wangkun 2 лет назад
Родитель
Сommit
b7847096ec

+ 11 - 1
README.MD

@@ -10,7 +10,7 @@ ${crawler_dir}:     爬虫执行路径,如: ./youtube/youtube_main/run_youtube
 ${log_type}:        日志命名格式,如: follow,则在 youtube/logs/目录下,生成 2023-02-08-follow.log
 ${crawler}:         哪款爬虫,如: youtube / kanyikan / weixinzhishu
 ${strategy}:        爬虫策略,如: 定向爬虫策略 / 小时榜爬虫策略 / 热榜爬虫策略
-${oss_endpoint}:    OSS网关,内网: inner / 外网: out / 香港: hk
+# ${oss_endpoint}:    OSS网关,内网: inner / 外网: out / 香港: hk
 ${env}:             爬虫运行环境,正式环境: prod / 测试环境: dev
 ${machine}:         爬虫运行机器,阿里云服务器: aliyun_hk / aliyun / macpro / macair / local
 ${nohup_dir}:       nohup日志存储路径,如: ./youtube/nohup.log
@@ -19,6 +19,7 @@ ${nohup_dir}:       nohup日志存储路径,如: ./youtube/nohup.log
 #### YouTube
 ```
 sh ./main/main.sh ./youtube/youtube_main/run_youtube_follow.py --log_type="follow" --crawler="youtube" --strategy="定向爬虫策略" --oss_endpoint="hk" --env="prod" --machine="aliyun_hk" youtube/nohup.log
+# sh ./main/main.sh ./youtube/youtube_main/run_youtube_follow.py --log_type="follow" --crawler="youtube" --strategy="定向爬虫策略" --env="prod" --machine="aliyun_hk" youtube/nohup.log
 youtube杀进程命令: 
 ps aux | grep run_youtube
 ps aux | grep run_youtube | grep -v grep | awk '{print $2}' | xargs kill -9
@@ -27,6 +28,9 @@ ps aux | grep run_youtube | grep -v grep | awk '{print $2}' | xargs kill -9
 #### 微信指数
 ```
 微信指数杀进程
+nohup python3 -u weixinzhishu/weixinzhishu_main/weixinzhishu_inner_sort.py >>./weixinzhishu/nohup_inner_sort.log 2>&1 &
+nohup python3 -u weixinzhishu/weixinzhishu_main/weixinzhishu_inner_long.py >>./weixinzhishu/nohup_inner_long.log 2>&1 &
+nohup python3 -u weixinzhishu/weixinzhishu_main/weixinzhishu_out.py >>./weixinzhishu/nohup_out.log 2>&1 &
 ps aux | grep run_weixinzhishu
 ps aux | grep run_weixinzhishu | grep -v grep | awk '{print $2}' | xargs kill -9
 ```
@@ -35,10 +39,13 @@ ps aux | grep run_weixinzhishu | grep -v grep | awk '{print $2}' | xargs kill -9
 ```
 阿里云 102 服务器
 sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" xigua/nohup.log
+# sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --env="prod" --machine="aliyun" xigua/nohup.log
 本机
 sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="out" --env="prod" --machine="local" xigua/nohup.log
+# sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --env="prod" --machine="local" xigua/nohup.log
 macpro
 sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="out" --env="prod" --machine="macpro" xigua/nohup.log
+# sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --env="prod" --machine="macpro" xigua/nohup.log
 杀进程命令:
 ps aux | grep run_xigua
 ps aux | grep run_xigua | grep -v grep | awk '{print $2}' | xargs kill -9
@@ -48,10 +55,13 @@ ps aux | grep run_xigua | grep -v grep | awk '{print $2}' | xargs kill -9
 ```
 阿里云 102 服务器
 sh ./main/main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="follow" --crawler="kuaishou" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" kuaishou/nohup.log
+# sh ./main/main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="follow" --crawler="kuaishou" --strategy="定向爬虫策略" --env="prod" --machine="aliyun" kuaishou/nohup.log
 本机
 sh ./main/main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="follow" --crawler="kuaishou" --strategy="定向爬虫策略" --oss_endpoint="out" --env="dev" --machine="local" kuaishou/nohup.log
+# sh ./main/main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="follow" --crawler="kuaishou" --strategy="定向爬虫策略" --env="dev" --machine="local" kuaishou/nohup.log
 macpro
 sh ./main/main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="follow" --crawler="kuaishou" --strategy="定向爬虫策略" --oss_endpoint="out" --env="prod" --machine="macpro" kuaishou/nohup.log
+# sh ./main/main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="follow" --crawler="kuaishou" --strategy="定向爬虫策略" --env="prod" --machine="macpro" kuaishou/nohup.log
 杀进程命令:
 ps aux | grep run_kuaishou
 ps aux | grep run_kuaishou | grep -v grep | awk '{print $2}' | xargs kill -9

+ 35 - 0
common/db.py

@@ -4,6 +4,7 @@
 """
 数据库连接及操作
 """
+import redis
 import pymysql
 from common.common import Common
 
@@ -89,6 +90,40 @@ class MysqlHelper:
         # 关闭数据库连接
         connect.close()
 
+class RedisHelper:
+    @classmethod
+    def connect_redis(cls, env, machine):
+        if machine == 'aliyun_hk':
+            redis_pool = redis.ConnectionPool(
+                host='r-bp154bpw97gptefiqk.redis.rds.aliyuncs.com',  # 内网地址
+                # host='r-bp154bpw97gptefiqkpd.redis.rds.aliyuncs.com',  # 外网地址
+                port=6379,
+                db=2,
+                password='Qingqu2019'
+            )
+            redis_conn = redis.Redis(connection_pool=redis_pool)
+        elif env == 'prod':
+            redis_pool = redis.ConnectionPool(
+                host='r-bp154bpw97gptefiqk.redis.rds.aliyuncs.com',  # 内网地址
+                # host='r-bp154bpw97gptefiqkpd.redis.rds.aliyuncs.com',  # 外网地址
+                port=6379,
+                db=2,
+                password='Qingqu2019'
+            )
+            redis_conn = redis.Redis(connection_pool=redis_pool)
+        else:
+            redis_pool = redis.ConnectionPool(
+                host='r-bp154bpw97gptefiqk.redis.rds.aliyuncs.com',  # 内网地址
+                # host='r-bp154bpw97gptefiqkpd.redis.rds.aliyuncs.com',  # 外网地址
+                port=6379,
+                db=2,
+                password='Qingqu2019'
+            )
+            redis_conn = redis.Redis(connection_pool=redis_pool)
+        return redis_conn
+
+
+
 if __name__ == "__main__":
     # sql_statement = f"INSERT INTO crawler_user ( user_id, out_user_id, out_user_name, out_avatar_url, platform, tag) " \
     #       f"VALUES ('6282398', 'out_uid_003', 'out_user_name', '', 'xiaoniangao', 'xiaoniangao_play')"

+ 789 - 0
kuaishou/kuaishou_follow/kuaishou_follow_scheduling.py

@@ -0,0 +1,789 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/24
+import os
+import random
+import shutil
+import sys
+import time
+import requests
+import json
+
+import urllib3
+from requests.adapters import HTTPAdapter
+from selenium import webdriver
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.users import Users
+from common.db import MysqlHelper
+from common.publish import Publish
+
+
+class Follow:
+    platform = "快手"
+    tag = "快手爬虫,定向爬虫策略"
+
+    @classmethod
+    def get_rule(cls, log_type, crawler, index):
+        try:
+            while True:
+                rule_sheet = Feishu.get_values_batch(log_type, crawler, "3iqG4z")
+                if rule_sheet is None:
+                    Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
+                    time.sleep(10)
+                    continue
+                if index == 1:
+                    rule_dict = {
+                        "play_cnt": f"{rule_sheet[1][1]}{rule_sheet[1][2]}",
+                        "video_width": f"{rule_sheet[2][1]}{rule_sheet[2][2]}",
+                        "video_height": f"{rule_sheet[3][1]}{rule_sheet[3][2]}",
+                        "like_cnt": f"{rule_sheet[4][1]}{rule_sheet[4][2]}",
+                        "duration": f"{rule_sheet[5][1]}{rule_sheet[5][2]}",
+                        "download_cnt": f"{rule_sheet[6][1]}{rule_sheet[6][2]}",
+                        "publish_time": f"{rule_sheet[7][1]}{rule_sheet[7][2]}",
+                    }
+                    # for k, v in rule_dict.items():
+                    #     Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    return rule_dict
+                elif index == 2:
+                    rule_dict = {
+                        "play_cnt": f"{rule_sheet[9][1]}{rule_sheet[9][2]}",
+                        "video_width": f"{rule_sheet[10][1]}{rule_sheet[10][2]}",
+                        "video_height": f"{rule_sheet[11][1]}{rule_sheet[11][2]}",
+                        "like_cnt": f"{rule_sheet[12][1]}{rule_sheet[12][2]}",
+                        "duration": f"{rule_sheet[13][1]}{rule_sheet[13][2]}",
+                        "download_cnt": f"{rule_sheet[14][1]}{rule_sheet[14][2]}",
+                        "publish_time": f"{rule_sheet[15][1]}{rule_sheet[15][2]}",
+                    }
+                    # for k, v in rule_dict.items():
+                    #     Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    return rule_dict
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
+
+    @classmethod
+    def download_rule(cls, video_dict, rule_dict):
+        if eval(f"{video_dict['play_cnt']}{rule_dict['play_cnt']}") is True \
+                and eval(f"{video_dict['video_width']}{rule_dict['video_width']}") is True \
+                and eval(f"{video_dict['video_height']}{rule_dict['video_height']}") is True \
+                and eval(f"{video_dict['like_cnt']}{rule_dict['like_cnt']}") is True \
+                and eval(f"{video_dict['duration']}{rule_dict['duration']}") is True \
+                and eval(f"{video_dict['publish_time']}{rule_dict['publish_time']}") is True:
+            return True
+        else:
+            return False
+
+    # 过滤词库
+    @classmethod
+    def filter_words(cls, log_type, crawler):
+        try:
+            while True:
+                filter_words_sheet = Feishu.get_values_batch(log_type, crawler, 'HIKVvs')
+                if filter_words_sheet is None:
+                    Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
+                    continue
+                filter_words_list = []
+                for x in filter_words_sheet:
+                    for y in x:
+                        if y is None:
+                            pass
+                        else:
+                            filter_words_list.append(y)
+                return filter_words_list
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
+
+    # 万能标题
+    @classmethod
+    def random_title(cls, log_type, crawler):
+        try:
+            while True:
+                random_title_sheet = Feishu.get_values_batch(log_type, crawler, '0DiyXe')
+                if random_title_sheet is None:
+                    Common.logger(log_type, crawler).warning(f"filter_words_sheet:{random_title_sheet} 10秒钟后重试")
+                    continue
+                random_title_list = []
+                for x in random_title_sheet:
+                    for y in x:
+                        if y is None:
+                            pass
+                        else:
+                            random_title_list.append(y)
+                return random.choice(random_title_list)
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'random_title:{e}\n')
+
+    # 获取站外用户信息
+    @classmethod
+    def get_out_user_info(cls, log_type, crawler, out_uid):
+        try:
+            url = "https://www.kuaishou.com/graphql"
+            payload = json.dumps({
+                "operationName": "visionProfile",
+                "variables": {
+                    "userId": str(out_uid)
+                },
+                "query": "query visionProfile($userId: String) {\n  visionProfile(userId: $userId) {\n    result\n    hostName\n    userProfile {\n      ownerCount {\n        fan\n        photo\n        follow\n        photo_public\n        __typename\n      }\n      profile {\n        gender\n        user_name\n        user_id\n        headurl\n        user_text\n        user_profile_bg_url\n        __typename\n      }\n      isFollowing\n      __typename\n    }\n    __typename\n  }\n}\n"
+            })
+            headers = {
+                # 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId={"".join(str(random.choice(range(1, 10))) for _ in range(10))}; kpn=KUAISHOU_VISION; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABE4wGjnJauApJelOpl9Xqo8TVDAyra7Pvo0rZtVgMSZxgVuw4Z6P2UtHv_CHOk2Ne2el1hdE_McCptWs8tRdtYlhXFlVOu8rQX7CwexzOBudJAfB3lDN8LPc4o4qHNwqFxy5J5j_WzdllbqMmaDUK9yUxX6XA-JFezzq9jvBwtGv7_hzB7pFrUcH39z0EYOQaZo5lDl-pE09Gw7wr8NvlZRoSdWlbobCW6oJxuQLJTUr9oj_uIiBhkeb1psaIIc3VwfYQ1UfvobrXAP_WpnRabE_3UZUBOygFMAE; kuaishou.server.web_ph=2b981e2051d7130c977fd31df97fe6f5ad54',
+                'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABszwASP0eGT2ym0NUdGf1PTi8_gDYrDms9otf5FKMUrx0V7MOTT1hhvpVJijmgku8KYtu3a6g3X1hZODMpX0ebZUip44txWPoY3VRqbsnBKEOs9Qkmx3uLaX33nq8KevOKLoO0cIE8nfwMJISe_BtCHr22cbJkofI0xfJXBt_ZgPqJIqWsUwdgwKzqRYn47ROkIqzeDfEjbxaZxiXy22ZhhoSlCobbmtjkvjpY9x730BPP_C5IiAl9EBaCTyvMw5IIcnkmcjMeWC8w6LKzeFMCNhcqrfkgCgFMAE; kuaishou.server.web_ph=19ae269a54f619c49def39ef5b54ae6d4353; kpn=KUAISHOU_VISION',
+                'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
+                'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
+                'content-type': 'application/json',
+                'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
+                'Cache-Control': 'no-cache',
+                'Connection': 'keep-alive',
+                'Origin': 'https://www.kuaishou.com',
+                'Pragma': 'no-cache',
+                'Sec-Fetch-Dest': 'empty',
+                'Sec-Fetch-Mode': 'cors',
+                'Sec-Fetch-Site': 'same-origin',
+                'accept': '*/*',
+                'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
+                'sec-ch-ua-mobile': '?0',
+                'sec-ch-ua-platform': '"macOS"'
+            }
+            urllib3.disable_warnings()
+            s = requests.session()
+            # max_retries=3 重试3次
+            s.mount('http://', HTTPAdapter(max_retries=3))
+            s.mount('https://', HTTPAdapter(max_retries=3))
+            response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False,
+                              timeout=5)
+            response.close()
+            # Common.logger(log_type, crawler).info(f"get_out_user_info_response:{response.text}")
+            if response.status_code != 200:
+                Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.text}\n")
+                return
+            elif 'data' not in response.json():
+                Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()}\n")
+                return
+            elif 'visionProfile' not in response.json()['data']:
+                Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()['data']}\n")
+                return
+            elif 'userProfile' not in response.json()['data']['visionProfile']:
+                Common.logger(log_type, crawler).warning(
+                    f"get_out_user_info_response:{response.json()['data']['visionProfile']['userProfile']}\n")
+                return
+            else:
+                userProfile = response.json()['data']['visionProfile']['userProfile']
+                # Common.logger(log_type, crawler).info(f"userProfile:{userProfile}")
+
+                try:
+                    out_fans_str = str(userProfile['ownerCount']['fan'])
+                except Exception:
+                    out_fans_str = "0"
+
+                try:
+                    out_follow_str = str(userProfile['ownerCount']['follow'])
+                except Exception:
+                    out_follow_str = "0"
+
+                try:
+                    out_avatar_url = userProfile['profile']['headurl']
+                except Exception:
+                    out_avatar_url = ""
+
+                Common.logger(log_type, crawler).info(f"out_fans_str:{out_fans_str}")
+                Common.logger(log_type, crawler).info(f"out_follow_str:{out_follow_str}")
+                Common.logger(log_type, crawler).info(f"out_avatar_url:{out_avatar_url}")
+
+                if "万" in out_fans_str:
+                    out_fans = int(float(out_fans_str.split("万")[0]) * 10000)
+                else:
+                    out_fans = int(out_fans_str.replace(",", ""))
+                if "万" in out_follow_str:
+                    out_follow = int(float(out_follow_str.split("万")[0]) * 10000)
+                else:
+                    out_follow = int(out_follow_str.replace(",", ""))
+
+                out_user_dict = {
+                    "out_fans": out_fans,
+                    "out_follow": out_follow,
+                    "out_avatar_url": out_avatar_url
+                }
+                Common.logger(log_type, crawler).info(f"out_user_dict:{out_user_dict}")
+                return out_user_dict
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_out_user_info:{e}\n")
+
+    # 获取用户信息列表
+    @classmethod
+    def get_user_list(cls, log_type, crawler, sheetid, env, machine):
+        try:
+            while True:
+                user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+                if user_sheet is None:
+                    Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet} 10秒钟后重试")
+                    continue
+                our_user_list = []
+                for i in range(1, len(user_sheet)):
+                    # for i in range(1, 2):
+                    out_uid = user_sheet[i][2]
+                    user_name = user_sheet[i][3]
+                    our_uid = user_sheet[i][6]
+                    our_user_link = user_sheet[i][7]
+                    if out_uid is None or user_name is None:
+                        Common.logger(log_type, crawler).info("空行\n")
+                    else:
+                        Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息\n")
+                        if our_uid is None:
+                            out_user_info = cls.get_out_user_info(log_type, crawler, out_uid)
+                            out_user_dict = {
+                                "out_uid": out_uid,
+                                "user_name": user_name,
+                                "out_avatar_url": out_user_info["out_avatar_url"],
+                                "out_create_time": '',
+                                "out_tag": '',
+                                "out_play_cnt": 0,
+                                "out_fans": out_user_info["out_fans"],
+                                "out_follow": out_user_info["out_follow"],
+                                "out_friend": 0,
+                                "out_like": 0,
+                                "platform": cls.platform,
+                                "tag": cls.tag,
+                            }
+                            our_user_dict = Users.create_user(log_type=log_type, crawler=crawler,
+                                                              out_user_dict=out_user_dict, env=env, machine=machine)
+                            our_uid = our_user_dict['our_uid']
+                            our_user_link = our_user_dict['our_user_link']
+                            Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
+                                                 [[our_uid, our_user_link]])
+                            Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
+                            our_user_list.append(our_user_dict)
+                        else:
+                            our_user_dict = {
+                                'out_uid': out_uid,
+                                'user_name': user_name,
+                                'our_uid': our_uid,
+                                'our_user_link': our_user_link,
+                            }
+                            our_user_list.append(our_user_dict)
+                return our_user_list
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'get_user_list:{e}\n')
+
+    # 处理视频标题
+    @classmethod
+    def video_title(cls, log_type, crawler, title):
+        title_split1 = title.split(" #")
+        if title_split1[0] != "":
+            title1 = title_split1[0]
+        else:
+            title1 = title_split1[-1]
+
+        title_split2 = title1.split(" #")
+        if title_split2[0] != "":
+            title2 = title_split2[0]
+        else:
+            title2 = title_split2[-1]
+
+        title_split3 = title2.split("@")
+        if title_split3[0] != "":
+            title3 = title_split3[0]
+        else:
+            title3 = title_split3[-1]
+
+        video_title = title3.strip().replace("\n", "") \
+                          .replace("/", "").replace("快手", "").replace(" ", "") \
+                          .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
+                          .replace("#", "").replace(".", "。").replace("\\", "") \
+                          .replace(":", "").replace("*", "").replace("?", "") \
+                          .replace("?", "").replace('"', "").replace("<", "") \
+                          .replace(">", "").replace("|", "").replace("@", "")[:40]
+        if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
+            return cls.random_title(log_type, crawler)
+        else:
+            return video_title
+
+    @classmethod
+    def get_cookie(cls, log_type, crawler, out_uid, machine):
+        try:
+            # 打印请求配置
+            ca = DesiredCapabilities.CHROME
+            ca["goog:loggingPrefs"] = {"performance": "ALL"}
+
+            # 不打开浏览器运行
+            chrome_options = webdriver.ChromeOptions()
+            chrome_options.add_argument("headless")
+            chrome_options.add_argument(
+                f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
+            chrome_options.add_argument("--no-sandbox")
+
+            # driver初始化
+            if machine == "aliyun":
+                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
+            elif machine == "macpro":
+                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options,
+                                          service=Service('/Users/lieyunye/Downloads/chromedriver_v107/chromedriver'))
+            elif machine == "macair":
+                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options,
+                                          service=Service('/Users/piaoquan/Downloads/chromedriver_v108/chromedriver'))
+            else:
+                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
+                    '/Users/wangkun/Downloads/chromedriver/chromedriver_v109/chromedriver'))
+
+            driver.implicitly_wait(10)
+            # print('打开个人主页')
+            driver.get(f'https://www.kuaishou.com/profile/{out_uid}')
+            time.sleep(1)
+
+            # print('解析cookies')
+            logs = driver.get_log("performance")
+            # Common.logger(log_type, crawler).info('已获取logs:{}\n', logs)
+            # print('退出浏览器')
+            driver.quit()
+            for line in logs:
+                msg = json.loads(line['message'])
+                # Common.logger(log_type, crawler).info(f"{msg}\n\n")
+                if 'message' not in msg:
+                    pass
+                elif 'params' not in msg['message']:
+                    pass
+                elif 'headers' not in msg['message']['params']:
+                    pass
+                elif 'Cookie' not in msg['message']['params']['headers']:
+                    pass
+                elif msg['message']['params']['headers']['Host'] != 'www.kuaishou.com':
+                    pass
+                else:
+                    cookie = msg['message']['params']['headers']['Cookie']
+                    # Common.logger(log_type, crawler).info(f"{cookie}")
+                    return cookie
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_cookie:{e}\n")
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
+        try:
+            download_cnt_1, download_cnt_2 = 0, 0
+            pcursor = ""
+
+            while True:
+                rule_dict_1 = cls.get_rule(log_type, crawler, 1)
+                rule_dict_2 = cls.get_rule(log_type, crawler, 2)
+                if rule_dict_1 is None or rule_dict_2 is None:
+                    Common.logger(log_type, crawler).warning(f"rule_dict is None, 10秒后重试")
+                    time.sleep(10)
+                else:
+                    break
+
+            while True:
+                if download_cnt_1 >= int(
+                        rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[
+                            -1]) and download_cnt_2 >= int(
+                        rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
+                    Common.logger(log_type, crawler).info(
+                        f"规则1已下载{download_cnt_1}条视频,规则2已下载{download_cnt_2}条视频\n")
+                    return
+
+                url = "https://www.kuaishou.com/graphql"
+                payload = json.dumps({
+                    "operationName": "visionProfilePhotoList",
+                    "variables": {
+                        "userId": out_uid,
+                        "pcursor": pcursor,
+                        "page": "profile"
+                    },
+                    "query": "fragment photoContent on PhotoEntity {\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  __typename\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n  visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n    result\n    llsid\n    webPageArea\n    feeds {\n      ...feedContent\n      __typename\n    }\n    hostName\n    pcursor\n    __typename\n  }\n}\n"
+                })
+                # get_cookie = cls.get_cookie(log_type, crawler, out_uid, machine)
+                # if get_cookie is None:
+                #     cookie = 'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABszwASP0eGT2ym0NUdGf1PTi8_gDYrDms9otf5FKMUrx0V7MOTT1hhvpVJijmgku8KYtu3a6g3X1hZODMpX0ebZUip44txWPoY3VRqbsnBKEOs9Qkmx3uLaX33nq8KevOKLoO0cIE8nfwMJISe_BtCHr22cbJkofI0xfJXBt_ZgPqJIqWsUwdgwKzqRYn47ROkIqzeDfEjbxaZxiXy22ZhhoSlCobbmtjkvjpY9x730BPP_C5IiAl9EBaCTyvMw5IIcnkmcjMeWC8w6LKzeFMCNhcqrfkgCgFMAE; kuaishou.server.web_ph=19ae269a54f619c49def39ef5b54ae6d4353; kpn=KUAISHOU_VISION'
+                # else:
+                #     cookie = get_cookie
+                # Common.logger(log_type, crawler).info(f"cookie:{cookie}")
+                headers = {
+                    # 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId={"".join(str(random.choice(range(1, 10))) for _ in range(10))}; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABOLgYYcIJ5ilxU46Jc-HLWThY8sppX3V0htC_KhSGOzAjP2hAOdegzfkZGAxS5rf6rCBS487FkxfYzLkV__I6b1lK16rDjvv94Kkoo4z7mgf8y8rFgWoqrp81JAWTtx00y-wrc1XXPf9RAVQoET70wWaeNG2r5bxtZEiNwpK_zPi0ZdUo0BW13dFKfVssAy2xKYh0UlJ8VSd_vBvyMKSxVBoSf061Kc3w5Nem7YdpVBmH39ceIiBpiGioLzbZqlHiSbwkH_LhUhNXz3o7LITj098KUytk2CgFMAE; kuaishou.server.web_ph=f1033957981996a7d50e849a9ded4cf4adff; kpn=KUAISHOU_VISION',
+                    # 'Cookie': cookie,
+                    'Cookie': 'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABszwASP0eGT2ym0NUdGf1PTi8_gDYrDms9otf5FKMUrx0V7MOTT1hhvpVJijmgku8KYtu3a6g3X1hZODMpX0ebZUip44txWPoY3VRqbsnBKEOs9Qkmx3uLaX33nq8KevOKLoO0cIE8nfwMJISe_BtCHr22cbJkofI0xfJXBt_ZgPqJIqWsUwdgwKzqRYn47ROkIqzeDfEjbxaZxiXy22ZhhoSlCobbmtjkvjpY9x730BPP_C5IiAl9EBaCTyvMw5IIcnkmcjMeWC8w6LKzeFMCNhcqrfkgCgFMAE; kuaishou.server.web_ph=19ae269a54f619c49def39ef5b54ae6d4353; kpn=KUAISHOU_VISION',
+                    'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
+                    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
+                    'content-type': 'application/json',
+                    # 'accept': '*/*',
+                    # 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
+                    # 'Cache-Control': 'no-cache',
+                    # 'Connection': 'keep-alive',
+                    # 'Origin': 'https://www.kuaishou.com',
+                    # 'Pragma': 'no-cache',
+                    # 'Sec-Fetch-Dest': 'empty',
+                    # 'Sec-Fetch-Mode': 'cors',
+                    # 'Sec-Fetch-Site': 'same-origin',
+                    # 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
+                    # 'sec-ch-ua-mobile': '?0',
+                    # 'sec-ch-ua-platform': '"macOS"'
+                }
+                urllib3.disable_warnings()
+                s = requests.session()
+                # max_retries=3 重试3次
+                s.mount('http://', HTTPAdapter(max_retries=3))
+                s.mount('https://', HTTPAdapter(max_retries=3))
+                response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False,
+                                  timeout=5)
+                response.close()
+                # Common.logger(log_type, crawler).info(f"get_videoList:{response.text}\n")
+                if response.status_code != 200:
+                    Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.text}\n")
+                    return
+                elif 'data' not in response.json():
+                    Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
+                    return
+                elif 'visionProfilePhotoList' not in response.json()['data']:
+                    Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
+                    return
+                elif 'feeds' not in response.json()['data']['visionProfilePhotoList']:
+                    Common.logger(log_type, crawler).warning(
+                        f"get_videoList_response:{response.json()['data']['visionProfilePhotoList']}\n")
+                    return
+                elif len(response.json()['data']['visionProfilePhotoList']['feeds']) == 0:
+                    Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
+                    return
+                else:
+                    feeds = response.json()['data']['visionProfilePhotoList']['feeds']
+                    pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
+                    # Common.logger(log_type, crawler).info(f"feeds0: {feeds}\n")
+                    for i in range(len(feeds)):
+                        if 'photo' not in feeds[i]:
+                            Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]}\n")
+                            break
+
+                        # video_title
+                        if 'caption' not in feeds[i]['photo']:
+                            video_title = cls.random_title(log_type, crawler)
+                        elif feeds[i]['photo']['caption'].strip() == "":
+                            video_title = cls.random_title(log_type, crawler)
+                        else:
+                            video_title = cls.video_title(log_type, crawler, feeds[i]['photo']['caption'])
+
+                        if 'videoResource' not in feeds[i]['photo'] \
+                                and 'manifest' not in feeds[i]['photo'] \
+                                and 'manifestH265' not in feeds[i]['photo']:
+                            Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
+                            break
+                        videoResource = feeds[i]['photo']['videoResource']
+
+                        if 'h264' not in videoResource and 'hevc' not in videoResource:
+                            Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
+                            break
+
+                        # video_id
+                        if 'h264' in videoResource and 'videoId' in videoResource['h264']:
+                            video_id = videoResource['h264']['videoId']
+                        elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
+                            video_id = videoResource['hevc']['videoId']
+                        else:
+                            video_id = ""
+
+                        # play_cnt
+                        if 'viewCount' not in feeds[i]['photo']:
+                            play_cnt = 0
+                        else:
+                            play_cnt = int(feeds[i]['photo']['viewCount'])
+
+                        # like_cnt
+                        if 'realLikeCount' not in feeds[i]['photo']:
+                            like_cnt = 0
+                        else:
+                            like_cnt = feeds[i]['photo']['realLikeCount']
+
+                        # publish_time
+                        if 'timestamp' not in feeds[i]['photo']:
+                            publish_time_stamp = 0
+                            publish_time_str = ''
+                            publish_time = 0
+                        else:
+                            publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
+                            publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                            publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
+
+                        # duration
+                        if 'duration' not in feeds[i]['photo']:
+                            duration = 0
+                        else:
+                            duration = int(int(feeds[i]['photo']['duration']) / 1000)
+
+                        # video_width / video_height / video_url
+                        mapping = {}
+                        for item in ['width', 'height', 'url']:
+                            try:
+                                val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
+                            except Exception:
+                                val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
+                            except:
+                                val = ''
+                            mapping[item] = val
+                        video_width = int(mapping['width']) if mapping['width'] != '' else 0
+                        video_height = int(mapping['height']) if mapping['height'] != '' else 0
+                        video_url = mapping['url']
+
+                        # cover_url
+                        if 'coverUrl' not in feeds[i]['photo']:
+                            cover_url = ""
+                        else:
+                            cover_url = feeds[i]['photo']['coverUrl']
+
+                        # user_name / avatar_url
+                        try:
+                            user_name = feeds[i]['author']['name']
+                            avatar_url = feeds[i]['author']['headerUrl']
+                        except Exception:
+                            user_name = ''
+                            avatar_url = ''
+
+                        video_dict = {'video_title': video_title,
+                                      'video_id': video_id,
+                                      'play_cnt': play_cnt,
+                                      'comment_cnt': 0,
+                                      'like_cnt': like_cnt,
+                                      'share_cnt': 0,
+                                      'video_width': video_width,
+                                      'video_height': video_height,
+                                      'duration': duration,
+                                      'publish_time': publish_time,
+                                      'publish_time_stamp': publish_time_stamp,
+                                      'publish_time_str': publish_time_str,
+                                      'user_name': user_name,
+                                      'user_id': out_uid,
+                                      'avatar_url': avatar_url,
+                                      'cover_url': cover_url,
+                                      'video_url': video_url,
+                                      'session': f"kuaishou{int(time.time())}"}
+
+                        rule_1 = cls.download_rule(video_dict, rule_dict_1)
+                        Common.logger(log_type, crawler).info(f"video_title:{video_title}")
+                        Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
+
+                        Common.logger(log_type, crawler).info(
+                            f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
+                        Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
+
+                        rule_2 = cls.download_rule(video_dict, rule_dict_2)
+                        Common.logger(log_type, crawler).info(
+                            f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"publish_time:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
+                        Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
+
+                        if video_title == "" or video_url == "":
+                            Common.logger(log_type, crawler).info("无效视频\n")
+                            break
+                        elif rule_1 is True:
+                            if download_cnt_1 < int(
+                                    rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
+                                                                                                                  "")[
+                                        -1]):
+                                download_finished = cls.download_publish(log_type=log_type,
+                                                                         crawler=crawler,
+                                                                         strategy=strategy,
+                                                                         video_dict=video_dict,
+                                                                         rule_dict=rule_dict_1,
+                                                                         our_uid=our_uid,
+                                                                         oss_endpoint=oss_endpoint,
+                                                                         env=env,
+                                                                         machine=machine)
+                                if download_finished is True:
+                                    download_cnt_1 += 1
+                        elif rule_2 is True:
+                            if download_cnt_2 < int(
+                                    rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
+                                                                                                                  "")[
+                                        -1]):
+                                download_finished = cls.download_publish(log_type=log_type,
+                                                                         crawler=crawler,
+                                                                         strategy=strategy,
+                                                                         video_dict=video_dict,
+                                                                         rule_dict=rule_dict_2,
+                                                                         our_uid=our_uid,
+                                                                         oss_endpoint=oss_endpoint,
+                                                                         env=env,
+                                                                         machine=machine)
+                                if download_finished is True:
+                                    download_cnt_2 += 1
+                        else:
+                            Common.logger(log_type, crawler).info("不满足下载规则\n")
+                            # Common.logger(log_type, crawler).info(f"feeds: {feeds}\n")
+
+                    if pcursor == "no_more":
+                        Common.logger(log_type, crawler).info("已经到底了,没有更多内容了\n")
+                        return
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, video_title, publish_time, env, machine):
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" or (platform="{cls.platform}" and video_title="{video_title}" and publish_time="{publish_time}") """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
+        return len(repeat_video)
+
+    @classmethod
+    def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
+        try:
+            download_finished = False
+            if cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'],
+                                video_dict['publish_time_str'], env, machine) != 0:
+                Common.logger(log_type, crawler).info('视频已下载\n')
+            elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, crawler, "3cd128") for x in y]:
+                Common.logger(log_type, crawler).info('视频已下载\n')
+            elif any(word if word in video_dict['video_title'] else False for word in
+                     cls.filter_words(log_type, crawler)) is True:
+                Common.logger(log_type, crawler).info('标题已中过滤词\n')
+            else:
+                # 下载视频
+                Common.download_method(log_type=log_type, crawler=crawler, text='video',
+                                       title=video_dict['video_title'], url=video_dict['video_url'])
+                ffmpeg_dict = Common.ffmpeg(log_type, crawler,
+                                            f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+                if ffmpeg_dict is None or ffmpeg_dict['size'] == 0:
+                    Common.logger(log_type, crawler).warning(f"下载的视频无效,已删除\n")
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+                    return download_finished
+                # 下载封面
+                Common.download_method(log_type=log_type, crawler=crawler, text='cover',
+                                       title=video_dict['video_title'], url=video_dict['cover_url'])
+                # 保存视频信息至txt
+                Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+                # 上传视频
+                Common.logger(log_type, crawler).info("开始上传视频...")
+                our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                          crawler=crawler,
+                                                          strategy=strategy,
+                                                          our_uid=our_uid,
+                                                          env=env,
+                                                          oss_endpoint=oss_endpoint)
+                if env == 'dev':
+                    our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                else:
+                    our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                Common.logger(log_type, crawler).info("视频上传完成")
+
+                if our_video_id is None:
+                    Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+                    return download_finished
+
+                # 视频信息保存数据库
+                insert_sql = f""" insert into crawler_video(video_id,
+                                                        user_id,
+                                                        out_user_id,
+                                                        platform,
+                                                        strategy,
+                                                        out_video_id,
+                                                        video_title,
+                                                        cover_url,
+                                                        video_url,
+                                                        duration,
+                                                        publish_time,
+                                                        play_cnt,
+                                                        crawler_rule,
+                                                        width,
+                                                        height)
+                                                        values({our_video_id},
+                                                        {our_uid},
+                                                        "{video_dict['user_id']}",
+                                                        "{cls.platform}",
+                                                        "定向爬虫策略",
+                                                        "{video_dict['video_id']}",
+                                                        "{video_dict['video_title']}",
+                                                        "{video_dict['cover_url']}",
+                                                        "{video_dict['video_url']}",
+                                                        {int(video_dict['duration'])},
+                                                        "{video_dict['publish_time_str']}",
+                                                        {int(video_dict['play_cnt'])},
+                                                        '{json.dumps(rule_dict)}',
+                                                        {int(video_dict['video_width'])},
+                                                        {int(video_dict['video_height'])}) """
+                Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
+                Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+
+                # 视频写入飞书
+                Feishu.insert_columns(log_type, 'kuaishou', "fYdA8F", "ROWS", 1, 2)
+                upload_time = int(time.time())
+                values = [[our_video_id,
+                           time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                           "定向榜",
+                           str(video_dict['video_id']),
+                           video_dict['video_title'],
+                           our_video_link,
+                           video_dict['play_cnt'],
+                           video_dict['comment_cnt'],
+                           video_dict['like_cnt'],
+                           video_dict['share_cnt'],
+                           video_dict['duration'],
+                           f"{video_dict['video_width']}*{video_dict['video_height']}",
+                           video_dict['publish_time_str'],
+                           video_dict['user_name'],
+                           video_dict['user_id'],
+                           video_dict['avatar_url'],
+                           video_dict['cover_url'],
+                           video_dict['video_url']]]
+                time.sleep(1)
+                Feishu.update_values(log_type, 'kuaishou', "fYdA8F", "E2:Z2", values)
+                Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+                download_finished = True
+            return download_finished
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"download_publish:{e}\n")
+
+    @classmethod
+    def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
+        user_list = cls.get_user_list(log_type=log_type, crawler=crawler, sheetid="bTSzxW", env=env, machine=machine)
+        for user in user_list:
+            out_uid = user["out_uid"]
+            user_name = user["user_name"]
+            our_uid = user["our_uid"]
+            Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
+            cls.get_videoList(log_type=log_type,
+                              crawler=crawler,
+                              strategy=strategy,
+                              our_uid=our_uid,
+                              out_uid=out_uid,
+                              oss_endpoint=oss_endpoint,
+                              env=env,
+                              machine=machine)
+            sleep_time = 120
+            Common.logger(log_type, crawler).info(f"休眠{sleep_time}秒\n")
+            time.sleep(sleep_time)
+
+
+if __name__ == "__main__":
+    # Follow.get_videoList(log_type="follow",
+    #                      crawler="kuaishou",
+    #                      strategy="定向爬虫策略",
+    #                      our_uid="6282431",
+    #                      out_uid="3xws7ydsnmp5mgq",
+    #                      oss_endpoint="out",
+    #                      env="dev",
+    #                      machine="local")
+    # print(Follow.get_out_user_info("follow", "kuaishou", "3xgh4ja9be3wcaw"))
+    # print(Follow.get_out_user_info("follow", "kuaishou", "3x5wgjhfc7tx8ue"))
+    print(Follow.get_cookie("cookies", "kuaishou", "3xvp5w6twj77xeq", "local"))
+    print(Follow.get_cookie("cookies", "kuaishou", "3xgh4ja9be3wcaw", "local"))
+    print(Follow.get_cookie("cookies", "kuaishou", "3x5wgjhfc7tx8ue", "local"))
+    pass

+ 49 - 0
kuaishou/kuaishou_main/run_kuaishou_follow_scheduling.py

@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/27
+import argparse
+import os
+import sys
+# import time
+
+sys.path.append(os.getcwd())
+from common.common import Common
+# from common.feishu import Feishu
+from kuaishou.kuaishou_follow.kuaishou_follow_scheduling import Follow
+
+
+def main(log_type, crawler, strategy, oss_endpoint, env, machine):
+    while True:
+        try:
+            Common.logger(log_type, crawler).info('开始抓取 快手 定向榜\n')
+            Follow.get_follow_videos(log_type=log_type,
+                                     crawler=crawler,
+                                     strategy=strategy,
+                                     oss_endpoint=oss_endpoint,
+                                     env=env,
+                                     machine=machine)
+            Common.del_logs(log_type, crawler)
+            Common.logger(log_type, crawler).info('抓取完一轮\n')
+            break
+        except Exception as e:
+            Common.logger(log_type, crawler).info(f"快手定向榜异常,触发报警:{e}\n")
+            # Feishu.bot(log_type, crawler, f"快手定向榜异常,触发报警:{e}")
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--strategy')  ## 添加参数
+    parser.add_argument('--our_uid')  ## 添加参数
+    parser.add_argument('--oss_endpoint')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    parser.add_argument('--machine')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    # print(args)
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         strategy=args.strategy,
+         oss_endpoint=args.oss_endpoint,
+         env=args.env,
+         machine=args.machine)

+ 2 - 3
requirements.txt

@@ -1,4 +1,3 @@
-Appium_Python_Client==2.8.1
 ffmpeg==1.4
 loguru==0.6.0
 oss2==2.15.0
@@ -7,7 +6,7 @@ PyMySQL==1.0.2
 requests==2.27.1
 selenium~=4.2.0
 urllib3==1.26.9
-emoji~=2.2.0
 Appium-Python-Client~=2.8.1
 atomac~=1.2.0
-lxml~=4.9.1
+lxml~=4.9.1
+redis~=4.5.1

+ 3 - 8
scheduling/crawler_scheduling.py

@@ -14,7 +14,7 @@ class Scheduling:
     # 任务列表
     task_list = []
 
-    # 读取 / 更新任务表
+    # 读取任务表
     @classmethod
     def get_task(cls, log_type, crawler, env, machine):
         get_sql = """ select * from crawler_task_1 """
@@ -67,7 +67,7 @@ class Scheduling:
 
     # 更新下次启动时间,调用时机:调度该 task_id 的任务时
     @classmethod
-    def update_task(cls, log_type, crawler, env, machine):
+    def update_task(cls, log_type, crawler, task_id, next_time, interval_piaoquan, env, machine):
         if interval_piaoquan > 0:
             new_next_time = next_time + interval_piaoquan
             update_sql = f""" UPDATE crawler_task_1 SET next_time={new_next_time} WHERE task_id={task_id} """
@@ -91,12 +91,7 @@ class Scheduling:
         if len(pre_task_list) == 0:
             Common.logger(log_type, crawler).info("暂无新任务\n")
         else:
-            for i in range(len(pre_task_list)):
-                task_id = pre_task_list[i]["task_id"]
-                task_name = pre_task_list[i]["task_name"]
-                next_time = pre_task_list[i]["next_time"]
-                interval_piaoquan = pre_task_list[i]["interval_piaoquan"]
-                spider_rule = pre_task_list[i]["spider_rule"]
+            for pre_task in pre_task_list:
 
                 if machine == "hk":
                     # 写入 redis

+ 10 - 8
weixinzhishu/weixinzhishu_main/search_key.py

@@ -45,8 +45,9 @@ class Searchkey:
             driver.quit()
             time.sleep(1)
             Common.logger(log_type, crawler).info('关闭微信指数')
-            weixinzhishu_driver = cls.close_weixinzhishu(log_type, crawler)
-            weixinzhishu_driver.find_elements(By.NAME, '关闭')[-1].click()
+            cls.close_weixinzhishu(log_type, crawler)
+            # weixinzhishu_driver = cls.close_weixinzhishu(log_type, crawler)
+            # weixinzhishu_driver.find_elements(By.NAME, '关闭')[-1].click()
         except Exception as e:
             Common.logger(log_type, crawler).error(f'start_wechat异常:{e}\n')
 
@@ -60,12 +61,13 @@ class Searchkey:
         try:
             new_driver = webdriver.Remote(command_executor='http://127.0.0.1:4723', desired_capabilities=new_caps)
             windowElement = new_driver.find_elements(By.NAME, app_name)
-            if len(windowElement) != 0:
-                newWindowHandle = hex(int(windowElement[0].get_attribute("NativeWindowHandle")))
-                app_caps = {"appTopLevelWindow": newWindowHandle}
-                app_driver = webdriver.Remote(command_executor='http://127.0.0.1:4723',
-                                              desired_capabilities=app_caps)
-                return app_driver
+            windowElement[-1].click()
+            # if len(windowElement) != 0:
+            #     newWindowHandle = hex(int(windowElement[0].get_attribute("NativeWindowHandle")))
+            #     app_caps = {"appTopLevelWindow": newWindowHandle}
+            #     app_driver = webdriver.Remote(command_executor='http://127.0.0.1:4723',
+            #                                   desired_capabilities=app_caps)
+            #     return app_driver
         except Exception as e:
             Common.logger(log_type, crawler).error(f"close_weixinzhishu异常:{e}\n")
 

+ 1026 - 0
xigua/xigua_follow/xigua_follow_scheduling.py

@@ -0,0 +1,1026 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/17
+import base64
+import json
+import os
+import random
+import shutil
+import string
+import sys
+import time
+import requests
+import urllib3
+from requests.adapters import HTTPAdapter
+
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
+from selenium.webdriver.common.by import By
+from selenium import webdriver
+from lxml import etree
+
+sys.path.append(os.getcwd())
+from common.db import MysqlHelper
+from common.users import Users
+from common.common import Common
+from common.feishu import Feishu
+from common.publish import Publish
+
+
+class Follow:
+    # 个人主页视频翻页参数
+    offset = 0
+
+    platform = "西瓜视频"
+    tag = "西瓜视频爬虫,定向爬虫策略"
+
+    @classmethod
+    def get_rule(cls, log_type, crawler):
+        try:
+            while True:
+                rule_sheet = Feishu.get_values_batch(log_type, crawler, "4kxd31")
+                if rule_sheet is None:
+                    Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
+                    time.sleep(10)
+                    continue
+                rule_dict = {
+                    "play_cnt": int(rule_sheet[1][2]),
+                    "comment_cnt": int(rule_sheet[2][2]),
+                    "like_cnt": int(rule_sheet[3][2]),
+                    "duration": int(rule_sheet[4][2]),
+                    "publish_time": int(rule_sheet[5][2]),
+                    "video_width": int(rule_sheet[6][2]),
+                    "video_height": int(rule_sheet[7][2]),
+                }
+                return rule_dict
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
+
+    # 下载规则
+    @classmethod
+    def download_rule(cls, video_info_dict, rule_dict):
+        if video_info_dict['play_cnt'] >= rule_dict['play_cnt']:
+            if video_info_dict['comment_cnt'] >= rule_dict['comment_cnt']:
+                if video_info_dict['like_cnt'] >= rule_dict['like_cnt']:
+                    if video_info_dict['duration'] >= rule_dict['duration']:
+                        if video_info_dict['video_width'] >= rule_dict['video_width'] \
+                                or video_info_dict['video_height'] >= rule_dict['video_height']:
+                            return True
+                        else:
+                            return False
+                    else:
+                        return False
+                else:
+                    return False
+            else:
+                return False
+        else:
+            return False
+
+    # 过滤词库
+    @classmethod
+    def filter_words(cls, log_type, crawler):
+        try:
+            while True:
+                filter_words_sheet = Feishu.get_values_batch(log_type, crawler, 'KGB4Hc')
+                if filter_words_sheet is None:
+                    Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
+                    continue
+                filter_words_list = []
+                for x in filter_words_sheet:
+                    for y in x:
+                        if y is None:
+                            pass
+                        else:
+                            filter_words_list.append(y)
+                return filter_words_list
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
+
+    @classmethod
+    def get_out_user_info(cls, log_type, crawler, out_uid):
+        try:
+            headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
+                       'referer': f'https://www.ixigua.com/home/{out_uid}',
+                       'Cookie': f'ixigua-a-s=1; support_webp=true; support_avif=false; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; __ac_signature={cls.random_signature()}; MONITOR_WEB_ID=67cb5099-a022-4ec3-bb8e-c4de6ba51dd0; s_v_web_id=verify_lef4i99x_32SosrdH_Qrtk_4LJn_8S7q_fhu16xe3s8ZV; tt_scid=QLJjPuHf6wxVqu6IIq6gHiJXQpVrCwrdhjH2zpm7-E3ZniE1RXBcP6M8b41FJOdo41e1; ttwid=1%7CHHtv2QqpSGuSu8r-zXF1QoWsvjmNi1SJrqOrZzg-UCY%7C1677047013%7C5866a444e5ae10a9df8c11551db75010fb77b657f214ccf84e503fae8d313d09; msToken=PerXJcDdIsZ6zXkGITsftXX4mDaVaW21GuqtzSVdctH46oXXT2GcELIs9f0XW2hunRzP6KVHLZaYElRvNYflLKUXih7lC27XKxs3HjdZiXPK9NQaoKbLfA==; ixigua-a-s=1',}
+            url = f"https://www.ixigua.com/home/{out_uid}"
+            urllib3.disable_warnings()
+            s = requests.session()
+            # max_retries=3 重试3次
+            s.mount('http://', HTTPAdapter(max_retries=3))
+            s.mount('https://', HTTPAdapter(max_retries=3))
+            response = s.get(url=url, headers=headers, proxies=Common.tunnel_proxies(), verify=False, timeout=5).text
+            html = etree.HTML(response)
+            out_follow_str = html.xpath('//div[@class="userDetailV3__header__detail2"]/*[1]/span')[0].text.encode('raw_unicode_escape').decode()
+            out_fans_str = html.xpath('//div[@class="userDetailV3__header__detail2"]/*[2]/span')[0].text.encode('raw_unicode_escape').decode()
+            out_like_str = html.xpath('//div[@class="userDetailV3__header__detail2"]/*[3]/span')[0].text.encode('raw_unicode_escape').decode()
+            out_avatar_url = f"""https:{html.xpath('//span[@class="component-avatar__inner"]//img/@src')[0]}"""
+            if "万" in out_follow_str:
+                out_follow = int(float(out_follow_str.split("万")[0])*10000)
+            else:
+                out_follow = int(out_follow_str.replace(",", ""))
+            if "万" in out_fans_str:
+                out_fans = int(float(out_fans_str.split("万")[0])*10000)
+            else:
+                out_fans = int(out_fans_str.replace(",", ""))
+            if "万" in out_like_str:
+                out_like = int(float(out_like_str.split("万")[0])*10000)
+            else:
+                out_like = int(out_like_str.replace(",", ""))
+            out_user_dict = {
+                "out_follow": out_follow,
+                "out_fans": out_fans,
+                "out_like": out_like,
+                "out_avatar_url": out_avatar_url,
+            }
+            # for k, v in out_user_dict.items():
+            #     print(f"{k}:{v}")
+            return out_user_dict
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_out_user_info:{e}\n")
+
+    # 获取用户信息(字典格式). 注意:部分 user_id 字符类型是 int / str
+    @classmethod
+    def get_user_list(cls, log_type, crawler, sheetid, env, machine):
+        try:
+            while True:
+                user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+                if user_sheet is None:
+                    Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet} 10秒钟后重试")
+                    continue
+                our_user_list = []
+                for i in range(1, len(user_sheet)):
+                    out_uid = user_sheet[i][2]
+                    user_name = user_sheet[i][3]
+                    our_uid = user_sheet[i][6]
+                    our_user_link = user_sheet[i][7]
+                    if out_uid is None or user_name is None:
+                        Common.logger(log_type, crawler).info("空行\n")
+                    else:
+                        Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息\n")
+                        if our_uid is None:
+                            out_user_info = cls.get_out_user_info(log_type, crawler, out_uid)
+                            out_user_dict = {
+                                "out_uid": out_uid,
+                                "user_name": user_name,
+                                "out_avatar_url": out_user_info["out_avatar_url"],
+                                "out_create_time": '',
+                                "out_tag": '',
+                                "out_play_cnt": 0,
+                                "out_fans": out_user_info["out_fans"],
+                                "out_follow": out_user_info["out_follow"],
+                                "out_friend": 0,
+                                "out_like": out_user_info["out_like"],
+                                "platform": cls.platform,
+                                "tag": cls.tag,
+                            }
+                            our_user_dict = Users.create_user(log_type=log_type, crawler=crawler, out_user_dict=out_user_dict, env=env, machine=machine)
+                            our_uid = our_user_dict['our_uid']
+                            our_user_link = our_user_dict['our_user_link']
+                            Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}', [[our_uid, our_user_link]])
+                            Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
+                            our_user_list.append(our_user_dict)
+                        else:
+                            our_user_dict = {
+                                'out_uid': out_uid,
+                                'user_name': user_name,
+                                'our_uid': our_uid,
+                                'our_user_link': our_user_link,
+                            }
+                            our_user_list.append(our_user_dict)
+                return our_user_list
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'get_user_id_from_feishu异常:{e}\n')
+
+    @classmethod
+    def random_signature(cls):
+        src_digits = string.digits  # string_数字
+        src_uppercase = string.ascii_uppercase  # string_大写字母
+        src_lowercase = string.ascii_lowercase  # string_小写字母
+        digits_num = random.randint(1, 6)
+        uppercase_num = random.randint(1, 26 - digits_num - 1)
+        lowercase_num = 26 - (digits_num + uppercase_num)
+        password = random.sample(src_digits, digits_num) + random.sample(src_uppercase, uppercase_num) + random.sample(
+            src_lowercase, lowercase_num)
+        random.shuffle(password)
+        new_password = 'AAAAAAAAAA' + ''.join(password)[10:-4] + 'AAAB'
+        new_password_start = new_password[0:18]
+        new_password_end = new_password[-7:]
+        if new_password[18] == '8':
+            new_password = new_password_start + 'w' + new_password_end
+        elif new_password[18] == '9':
+            new_password = new_password_start + 'x' + new_password_end
+        elif new_password[18] == '-':
+            new_password = new_password_start + 'y' + new_password_end
+        elif new_password[18] == '.':
+            new_password = new_password_start + 'z' + new_password_end
+        else:
+            new_password = new_password_start + 'y' + new_password_end
+        return new_password
+
+    @classmethod
+    def get_signature(cls, log_type, crawler, out_uid, machine):
+        try:
+            # 打印请求配置
+            ca = DesiredCapabilities.CHROME
+            ca["goog:loggingPrefs"] = {"performance": "ALL"}
+
+            # 不打开浏览器运行
+            chrome_options = webdriver.ChromeOptions()
+            chrome_options.add_argument("--headless")
+            chrome_options.add_argument('--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
+            chrome_options.add_argument("--no-sandbox")
+
+            # driver初始化
+            if machine == 'aliyun' or machine == 'aliyun_hk':
+                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
+            elif machine == 'macpro':
+                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options,
+                                          service=Service('/Users/lieyunye/Downloads/chromedriver_v86/chromedriver'))
+            elif machine == 'macair':
+                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options,
+                                          service=Service('/Users/piaoquan/Downloads/chromedriver'))
+            else:
+                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service('/Users/wangkun/Downloads/chromedriver/chromedriver_v110/chromedriver'))
+            driver.implicitly_wait(10)
+            driver.get(f'https://www.ixigua.com/home/{out_uid}/')
+            time.sleep(3)
+            data_src = driver.find_elements(By.XPATH, '//img[@class="tt-img BU-MagicImage tt-img-loaded"]')[1].get_attribute("data-src")
+            signature = data_src.split("x-signature=")[-1]
+            return signature
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'get_signature异常:{e}\n')
+
+    # 获取视频详情
+    @classmethod
+    def get_video_url(cls, log_type, crawler, gid):
+        try:
+            url = 'https://www.ixigua.com/api/mixVideo/information?'
+            headers = {
+                "accept-encoding": "gzip, deflate",
+                "accept-language": "zh-CN,zh-Hans;q=0.9",
+                "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "
+                              "AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.5 Safari/605.1.15",
+                "referer": "https://www.ixigua.com/7102614741050196520?logTag=0531c88ac04f38ab2c62",
+            }
+            params = {
+                'mixId': gid,
+                'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfC'
+                           'NVVIOBNjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
+                'X-Bogus': 'DFSzswVupYTANCJOSBk0P53WxM-r',
+                '_signature': '_02B4Z6wo0000119LvEwAAIDCuktNZ0y5wkdfS7jAALThuOR8D9yWNZ.EmWHKV0WSn6Px'
+                              'fPsH9-BldyxVje0f49ryXgmn7Tzk-swEHNb15TiGqa6YF.cX0jW8Eds1TtJOIZyfc9s5emH7gdWN94',
+            }
+            cookies = {
+                'ixigua-a-s': '1',
+                'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfCNVVIOB'
+                           'NjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
+                'ttwid': '1%7C_yXQeHWwLZgCsgHClOwTCdYSOt_MjdOkgnPIkpi-Sr8%7C1661241238%7Cf57d0c5ef3f1d7'
+                         '6e049fccdca1ac54887c34d1f8731c8e51a49780ff0ceab9f8',
+                'tt_scid': 'QZ4l8KXDG0YAEaMCSbADdcybdKbUfG4BC6S4OBv9lpRS5VyqYLX2bIR8CTeZeGHR9ee3',
+                'MONITOR_WEB_ID': '0a49204a-7af5-4e96-95f0-f4bafb7450ad',
+                '__ac_nonce': '06304878000964fdad287',
+                '__ac_signature': '_02B4Z6wo00f017Rcr3AAAIDCUVxeW1tOKEu0fKvAAI4cvoYzV-wBhq7B6D8k0no7lb'
+                                  'FlvYoinmtK6UXjRIYPXnahUlFTvmWVtb77jsMkKAXzAEsLE56m36RlvL7ky.M3Xn52r9t1IEb7IR3ke8',
+                'ttcid': 'e56fabf6e85d4adf9e4d91902496a0e882',
+                '_tea_utm_cache_1300': 'undefined',
+                'support_avif': 'false',
+                'support_webp': 'false',
+                'xiguavideopcwebid': '7134967546256016900',
+                'xiguavideopcwebid.sig': 'xxRww5R1VEMJN_dQepHorEu_eAc',
+            }
+            urllib3.disable_warnings()
+            s = requests.session()
+            # max_retries=3 重试3次
+            s.mount('http://', HTTPAdapter(max_retries=3))
+            s.mount('https://', HTTPAdapter(max_retries=3))
+            response = s.get(url=url, headers=headers, params=params, cookies=cookies, verify=False, proxies=Common.tunnel_proxies(), timeout=5)
+            response.close()
+            if 'data' not in response.json() or response.json()['data'] == '':
+                Common.logger(log_type, crawler).warning('get_video_info: response: {}', response)
+            else:
+                video_info = response.json()['data']['gidInformation']['packerData']['video']
+                video_url_dict = {}
+                # video_url
+                if 'videoResource' not in video_info:
+                    video_url_dict["video_url"] = ''
+                    video_url_dict["audio_url"] = ''
+                    video_url_dict["video_width"] = 0
+                    video_url_dict["video_height"] = 0
+
+                elif 'dash_120fps' in video_info['videoResource']:
+                    if "video_list" in video_info['videoResource']['dash_120fps'] and 'video_4' in video_info['videoResource']['dash_120fps']['video_list']:
+                        video_url = video_info['videoResource']['dash_120fps']['video_list']['video_4']['backup_url_1']
+                        audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_4']['backup_url_1']
+                        if len(video_url) % 3 == 1:
+                            video_url += '=='
+                        elif len(video_url) % 3 == 2:
+                            video_url += '='
+                        elif len(audio_url) % 3 == 1:
+                            audio_url += '=='
+                        elif len(audio_url) % 3 == 2:
+                            audio_url += '='
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_info['videoResource']['dash_120fps']['video_list']['video_4']['vwidth']
+                        video_height = video_info['videoResource']['dash_120fps']['video_list']['video_4']['vheight']
+                        video_url_dict["video_url"] = video_url
+                        video_url_dict["audio_url"] = audio_url
+                        video_url_dict["video_width"] = video_width
+                        video_url_dict["video_height"] = video_height
+                    elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_3' in video_info['videoResource']['dash_120fps']['video_list']:
+                        video_url = video_info['videoResource']['dash_120fps']['video_list']['video_3']['backup_url_1']
+                        audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_3']['backup_url_1']
+                        if len(video_url) % 3 == 1:
+                            video_url += '=='
+                        elif len(video_url) % 3 == 2:
+                            video_url += '='
+                        elif len(audio_url) % 3 == 1:
+                            audio_url += '=='
+                        elif len(audio_url) % 3 == 2:
+                            audio_url += '='
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_info['videoResource']['dash_120fps']['video_list']['video_3']['vwidth']
+                        video_height = video_info['videoResource']['dash_120fps']['video_list']['video_3']['vheight']
+                        video_url_dict["video_url"] = video_url
+                        video_url_dict["audio_url"] = audio_url
+                        video_url_dict["video_width"] = video_width
+                        video_url_dict["video_height"] = video_height
+                    elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_2' in video_info['videoResource']['dash_120fps']['video_list']:
+                        video_url = video_info['videoResource']['dash_120fps']['video_list']['video_2']['backup_url_1']
+                        audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_2']['backup_url_1']
+                        if len(video_url) % 3 == 1:
+                            video_url += '=='
+                        elif len(video_url) % 3 == 2:
+                            video_url += '='
+                        elif len(audio_url) % 3 == 1:
+                            audio_url += '=='
+                        elif len(audio_url) % 3 == 2:
+                            audio_url += '='
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_info['videoResource']['dash_120fps']['video_list']['video_2']['vwidth']
+                        video_height = video_info['videoResource']['dash_120fps']['video_list']['video_2']['vheight']
+                        video_url_dict["video_url"] = video_url
+                        video_url_dict["audio_url"] = audio_url
+                        video_url_dict["video_width"] = video_width
+                        video_url_dict["video_height"] = video_height
+                    elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_1' in video_info['videoResource']['dash_120fps']['video_list']:
+                        video_url = video_info['videoResource']['dash_120fps']['video_list']['video_1']['backup_url_1']
+                        audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_1']['backup_url_1']
+                        if len(video_url) % 3 == 1:
+                            video_url += '=='
+                        elif len(video_url) % 3 == 2:
+                            video_url += '='
+                        elif len(audio_url) % 3 == 1:
+                            audio_url += '=='
+                        elif len(audio_url) % 3 == 2:
+                            audio_url += '='
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_info['videoResource']['dash_120fps']['video_list']['video_1']['vwidth']
+                        video_height = video_info['videoResource']['dash_120fps']['video_list']['video_1']['vheight']
+                        video_url_dict["video_url"] = video_url
+                        video_url_dict["audio_url"] = audio_url
+                        video_url_dict["video_width"] = video_width
+                        video_url_dict["video_height"] = video_height
+
+                    elif 'dynamic_video' in video_info['videoResource']['dash_120fps'] \
+                            and 'dynamic_video_list' in video_info['videoResource']['dash_120fps']['dynamic_video'] \
+                            and 'dynamic_audio_list' in video_info['videoResource']['dash_120fps']['dynamic_video'] \
+                            and len(video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list']) != 0 \
+                            and len(video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_audio_list']) != 0:
+
+                        video_url = video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1]['backup_url_1']
+                        audio_url = video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_audio_list'][-1]['backup_url_1']
+                        if len(video_url) % 3 == 1:
+                            video_url += '=='
+                        elif len(video_url) % 3 == 2:
+                            video_url += '='
+                        elif len(audio_url) % 3 == 1:
+                            audio_url += '=='
+                        elif len(audio_url) % 3 == 2:
+                            audio_url += '='
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1]['vwidth']
+                        video_height = video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1]['vheight']
+                        video_url_dict["video_url"] = video_url
+                        video_url_dict["audio_url"] = audio_url
+                        video_url_dict["video_width"] = video_width
+                        video_url_dict["video_height"] = video_height
+                    else:
+                        video_url_dict["video_url"] = ''
+                        video_url_dict["audio_url"] = ''
+                        video_url_dict["video_width"] = 0
+                        video_url_dict["video_height"] = 0
+
+                elif 'dash' in video_info['videoResource']:
+                    if "video_list" in video_info['videoResource']['dash'] and 'video_4' in video_info['videoResource']['dash']['video_list']:
+                        video_url = video_info['videoResource']['dash']['video_list']['video_4']['backup_url_1']
+                        audio_url = video_info['videoResource']['dash']['video_list']['video_4']['backup_url_1']
+                        if len(video_url) % 3 == 1:
+                            video_url += '=='
+                        elif len(video_url) % 3 == 2:
+                            video_url += '='
+                        elif len(audio_url) % 3 == 1:
+                            audio_url += '=='
+                        elif len(audio_url) % 3 == 2:
+                            audio_url += '='
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_info['videoResource']['dash']['video_list']['video_4']['vwidth']
+                        video_height = video_info['videoResource']['dash']['video_list']['video_4']['vheight']
+                        video_url_dict["video_url"] = video_url
+                        video_url_dict["audio_url"] = audio_url
+                        video_url_dict["video_width"] = video_width
+                        video_url_dict["video_height"] = video_height
+                    elif "video_list" in video_info['videoResource']['dash'] and 'video_3' in video_info['videoResource']['dash']['video_list']:
+                        video_url = video_info['videoResource']['dash']['video_list']['video_3']['backup_url_1']
+                        audio_url = video_info['videoResource']['dash']['video_list']['video_3']['backup_url_1']
+                        if len(video_url) % 3 == 1:
+                            video_url += '=='
+                        elif len(video_url) % 3 == 2:
+                            video_url += '='
+                        elif len(audio_url) % 3 == 1:
+                            audio_url += '=='
+                        elif len(audio_url) % 3 == 2:
+                            audio_url += '='
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_info['videoResource']['dash']['video_list']['video_3']['vwidth']
+                        video_height = video_info['videoResource']['dash']['video_list']['video_3']['vheight']
+                        video_url_dict["video_url"] = video_url
+                        video_url_dict["audio_url"] = audio_url
+                        video_url_dict["video_width"] = video_width
+                        video_url_dict["video_height"] = video_height
+                    elif "video_list" in video_info['videoResource']['dash'] and 'video_2' in video_info['videoResource']['dash']['video_list']:
+                        video_url = video_info['videoResource']['dash']['video_list']['video_2']['backup_url_1']
+                        audio_url = video_info['videoResource']['dash']['video_list']['video_2']['backup_url_1']
+                        if len(video_url) % 3 == 1:
+                            video_url += '=='
+                        elif len(video_url) % 3 == 2:
+                            video_url += '='
+                        elif len(audio_url) % 3 == 1:
+                            audio_url += '=='
+                        elif len(audio_url) % 3 == 2:
+                            audio_url += '='
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_info['videoResource']['dash']['video_list']['video_2']['vwidth']
+                        video_height = video_info['videoResource']['dash']['video_list']['video_2']['vheight']
+                        video_url_dict["video_url"] = video_url
+                        video_url_dict["audio_url"] = audio_url
+                        video_url_dict["video_width"] = video_width
+                        video_url_dict["video_height"] = video_height
+                    elif "video_list" in video_info['videoResource']['dash'] and 'video_1' in video_info['videoResource']['dash']['video_list']:
+                        video_url = video_info['videoResource']['dash']['video_list']['video_1']['backup_url_1']
+                        audio_url = video_info['videoResource']['dash']['video_list']['video_1']['backup_url_1']
+                        if len(video_url) % 3 == 1:
+                            video_url += '=='
+                        elif len(video_url) % 3 == 2:
+                            video_url += '='
+                        elif len(audio_url) % 3 == 1:
+                            audio_url += '=='
+                        elif len(audio_url) % 3 == 2:
+                            audio_url += '='
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_info['videoResource']['dash']['video_list']['video_1']['vwidth']
+                        video_height = video_info['videoResource']['dash']['video_list']['video_1']['vheight']
+                        video_url_dict["video_url"] = video_url
+                        video_url_dict["audio_url"] = audio_url
+                        video_url_dict["video_width"] = video_width
+                        video_url_dict["video_height"] = video_height
+
+                    elif 'dynamic_video' in video_info['videoResource']['dash'] \
+                            and 'dynamic_video_list' in video_info['videoResource']['dash']['dynamic_video'] \
+                            and 'dynamic_audio_list' in video_info['videoResource']['dash']['dynamic_video'] \
+                            and len(video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list']) != 0 \
+                            and len(video_info['videoResource']['dash']['dynamic_video']['dynamic_audio_list']) != 0:
+
+                        video_url = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1]['backup_url_1']
+                        audio_url = video_info['videoResource']['dash']['dynamic_video']['dynamic_audio_list'][-1]['backup_url_1']
+                        if len(video_url) % 3 == 1:
+                            video_url += '=='
+                        elif len(video_url) % 3 == 2:
+                            video_url += '='
+                        elif len(audio_url) % 3 == 1:
+                            audio_url += '=='
+                        elif len(audio_url) % 3 == 2:
+                            audio_url += '='
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1]['vwidth']
+                        video_height = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1]['vheight']
+                        video_url_dict["video_url"] = video_url
+                        video_url_dict["audio_url"] = audio_url
+                        video_url_dict["video_width"] = video_width
+                        video_url_dict["video_height"] = video_height
+                    else:
+                        video_url_dict["video_url"] = ''
+                        video_url_dict["audio_url"] = ''
+                        video_url_dict["video_width"] = 0
+                        video_url_dict["video_height"] = 0
+
+                elif 'normal' in video_info['videoResource']:
+                    if "video_list" in video_info['videoResource']['normal'] and 'video_4' in \
+                            video_info['videoResource']['normal']['video_list']:
+                        video_url = video_info['videoResource']['normal']['video_list']['video_4']['backup_url_1']
+                        audio_url = video_info['videoResource']['normal']['video_list']['video_4']['backup_url_1']
+                        if len(video_url) % 3 == 1:
+                            video_url += '=='
+                        elif len(video_url) % 3 == 2:
+                            video_url += '='
+                        elif len(audio_url) % 3 == 1:
+                            audio_url += '=='
+                        elif len(audio_url) % 3 == 2:
+                            audio_url += '='
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_info['videoResource']['normal']['video_list']['video_4']['vwidth']
+                        video_height = video_info['videoResource']['normal']['video_list']['video_4']['vheight']
+                        video_url_dict["video_url"] = video_url
+                        video_url_dict["audio_url"] = audio_url
+                        video_url_dict["video_width"] = video_width
+                        video_url_dict["video_height"] = video_height
+                    elif "video_list" in video_info['videoResource']['normal'] and 'video_3' in \
+                            video_info['videoResource']['normal']['video_list']:
+                        video_url = video_info['videoResource']['normal']['video_list']['video_3']['backup_url_1']
+                        audio_url = video_info['videoResource']['normal']['video_list']['video_3']['backup_url_1']
+                        if len(video_url) % 3 == 1:
+                            video_url += '=='
+                        elif len(video_url) % 3 == 2:
+                            video_url += '='
+                        elif len(audio_url) % 3 == 1:
+                            audio_url += '=='
+                        elif len(audio_url) % 3 == 2:
+                            audio_url += '='
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_info['videoResource']['normal']['video_list']['video_3']['vwidth']
+                        video_height = video_info['videoResource']['normal']['video_list']['video_3']['vheight']
+                        video_url_dict["video_url"] = video_url
+                        video_url_dict["audio_url"] = audio_url
+                        video_url_dict["video_width"] = video_width
+                        video_url_dict["video_height"] = video_height
+                    elif "video_list" in video_info['videoResource']['normal'] and 'video_2' in \
+                            video_info['videoResource']['normal']['video_list']:
+                        video_url = video_info['videoResource']['normal']['video_list']['video_2']['backup_url_1']
+                        audio_url = video_info['videoResource']['normal']['video_list']['video_2']['backup_url_1']
+                        if len(video_url) % 3 == 1:
+                            video_url += '=='
+                        elif len(video_url) % 3 == 2:
+                            video_url += '='
+                        elif len(audio_url) % 3 == 1:
+                            audio_url += '=='
+                        elif len(audio_url) % 3 == 2:
+                            audio_url += '='
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_info['videoResource']['normal']['video_list']['video_2']['vwidth']
+                        video_height = video_info['videoResource']['normal']['video_list']['video_2']['vheight']
+                        video_url_dict["video_url"] = video_url
+                        video_url_dict["audio_url"] = audio_url
+                        video_url_dict["video_width"] = video_width
+                        video_url_dict["video_height"] = video_height
+                    elif "video_list" in video_info['videoResource']['normal'] and 'video_1' in \
+                            video_info['videoResource']['normal']['video_list']:
+                        video_url = video_info['videoResource']['normal']['video_list']['video_1']['backup_url_1']
+                        audio_url = video_info['videoResource']['normal']['video_list']['video_1']['backup_url_1']
+                        if len(video_url) % 3 == 1:
+                            video_url += '=='
+                        elif len(video_url) % 3 == 2:
+                            video_url += '='
+                        elif len(audio_url) % 3 == 1:
+                            audio_url += '=='
+                        elif len(audio_url) % 3 == 2:
+                            audio_url += '='
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_info['videoResource']['normal']['video_list']['video_1']['vwidth']
+                        video_height = video_info['videoResource']['normal']['video_list']['video_1']['vheight']
+                        video_url_dict["video_url"] = video_url
+                        video_url_dict["audio_url"] = audio_url
+                        video_url_dict["video_width"] = video_width
+                        video_url_dict["video_height"] = video_height
+
+                    elif 'dynamic_video' in video_info['videoResource']['normal'] \
+                            and 'dynamic_video_list' in video_info['videoResource']['normal']['dynamic_video'] \
+                            and 'dynamic_audio_list' in video_info['videoResource']['normal']['dynamic_video'] \
+                            and len(video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list']) != 0 \
+                            and len(video_info['videoResource']['normal']['dynamic_video']['dynamic_audio_list']) != 0:
+
+                        video_url = video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list'][-1][
+                            'backup_url_1']
+                        audio_url = video_info['videoResource']['normal']['dynamic_video']['dynamic_audio_list'][-1][
+                            'backup_url_1']
+                        if len(video_url) % 3 == 1:
+                            video_url += '=='
+                        elif len(video_url) % 3 == 2:
+                            video_url += '='
+                        elif len(audio_url) % 3 == 1:
+                            audio_url += '=='
+                        elif len(audio_url) % 3 == 2:
+                            audio_url += '='
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list'][-1][
+                            'vwidth']
+                        video_height = video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list'][-1][
+                            'vheight']
+                        video_url_dict["video_url"] = video_url
+                        video_url_dict["audio_url"] = audio_url
+                        video_url_dict["video_width"] = video_width
+                        video_url_dict["video_height"] = video_height
+                    else:
+                        video_url_dict["video_url"] = ''
+                        video_url_dict["audio_url"] = ''
+                        video_url_dict["video_width"] = 0
+                        video_url_dict["video_height"] = 0
+
+                else:
+                    video_url_dict["video_url"] = ''
+                    video_url_dict["audio_url"] = ''
+                    video_url_dict["video_width"] = 0
+                    video_url_dict["video_height"] = 0
+
+                return video_url_dict
+
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'get_video_url:{e}\n')
+
+    @classmethod
+    def get_videolist(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
+        try:
+            signature = cls.random_signature()
+            while True:
+                url = "https://www.ixigua.com/api/videov2/author/new_video_list?"
+                params = {
+                    'to_user_id': str(out_uid),
+                    'offset': str(cls.offset),
+                    'limit': '30',
+                    'maxBehotTime': '0',
+                    'order': 'new',
+                    'isHome': '0',
+                    # 'msToken': 'G0eRzNkw189a8TLaXjc6nTHVMQwh9XcxVAqTbGKi7iPJdQcLwS3-XRrJ3MZ7QBfqErpxp3EX1WtvWOIcZ3NIgr41hgcd-v64so_RRj3YCRw1UsKW8mIssNLlIMspsg==',
+                    # 'X-Bogus': 'DFSzswVuEkUANjW9ShFTgR/F6qHt',
+                    '_signature': signature,
+                }
+                headers = {
+                    # 'authority': 'www.ixigua.com',
+                    # 'accept': 'application/json, text/plain, */*',
+                    # 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
+                    # 'cache-control': 'no-cache',
+                    # 'cookie': f'MONITOR_WEB_ID=7168304743566296612; __ac_signature={signature}; ixigua-a-s=1; support_webp=true; support_avif=false; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; msToken=G0eRzNkw189a8TLaXjc6nTHVMQwh9XcxVAqTbGKi7iPJdQcLwS3-XRrJ3MZ7QBfqErpxp3EX1WtvWOIcZ3NIgr41hgcd-v64so_RRj3YCRw1UsKW8mIssNLlIMspsg==; tt_scid=o4agqz7u9SKPwfBoPt6S82Cw0q.9KDtqmNe0JHxMqmpxNHQWq1BmrQdgVU6jEoX7ed99; ttwid=1%7CHHtv2QqpSGuSu8r-zXF1QoWsvjmNi1SJrqOrZzg-UCY%7C1676618894%7Cee5ad95378275f282f230a7ffa9947ae7eff40d0829c5a2568672a6dc90a1c96; ixigua-a-s=1',
+                    # 'pragma': 'no-cache',
+                    'referer': f'https://www.ixigua.com/home/{out_uid}/video/?preActiveKey=hotsoon&list_entrance=userdetail',
+                    # 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
+                    # 'sec-ch-ua-mobile': '?0',
+                    # 'sec-ch-ua-platform': '"macOS"',
+                    # 'sec-fetch-dest': 'empty',
+                    # 'sec-fetch-mode': 'cors',
+                    # 'sec-fetch-site': 'same-origin',
+                    'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
+                    # 'x-secsdk-csrf-token': '00010000000119e3f9454d1dcbb288704cda1960f241e2d19bd21f2fd283520c3615a990ac5a17448bfbb902a249'
+                }
+                urllib3.disable_warnings()
+                s = requests.session()
+                # max_retries=3 重试3次
+                s.mount('http://', HTTPAdapter(max_retries=3))
+                s.mount('https://', HTTPAdapter(max_retries=3))
+                response = s.get(url=url, headers=headers, params=params, proxies=Common.tunnel_proxies(), verify=False, timeout=5)
+                response.close()
+                cls.offset += 30
+                if response.status_code != 200:
+                    Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.text}\n")
+                    cls.offset = 0
+                    return
+                elif 'data' not in response.text:
+                    Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.text}\n")
+                    cls.offset = 0
+                    return
+                elif 'videoList' not in response.json()["data"]:
+                    Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.json()}\n")
+                    cls.offset = 0
+                    return
+                else:
+                    videoList = response.json()['data']['videoList']
+                    for i in range(len(videoList)):
+                        # video_title
+                        if 'title' not in videoList[i]:
+                            video_title = 0
+                        else:
+                            video_title = videoList[i]['title'].strip().replace('手游', '') \
+                                .replace('/', '').replace('\/', '').replace('\n', '')
+
+                        # video_id
+                        if 'video_id' not in videoList[i]:
+                            video_id = 0
+                        else:
+                            video_id = videoList[i]['video_id']
+
+                        # gid
+                        if 'gid' not in videoList[i]:
+                            gid = 0
+                        else:
+                            gid = videoList[i]['gid']
+
+                        # play_cnt
+                        if 'video_detail_info' not in videoList[i]:
+                            play_cnt = 0
+                        elif 'video_watch_count' not in videoList[i]['video_detail_info']:
+                            play_cnt = 0
+                        else:
+                            play_cnt = videoList[i]['video_detail_info']['video_watch_count']
+
+                        # comment_cnt
+                        if 'comment_count' not in videoList[i]:
+                            comment_cnt = 0
+                        else:
+                            comment_cnt = videoList[i]['comment_count']
+
+                        # like_cnt
+                        if 'digg_count' not in videoList[i]:
+                            like_cnt = 0
+                        else:
+                            like_cnt = videoList[i]['digg_count']
+
+                        # share_cnt
+                        share_cnt = 0
+
+                        # video_duration
+                        if 'video_duration' not in videoList[i]:
+                            video_duration = 0
+                        else:
+                            video_duration = int(videoList[i]['video_duration'])
+
+                        # send_time
+                        if 'publish_time' not in videoList[i]:
+                            publish_time = 0
+                        else:
+                            publish_time = videoList[i]['publish_time']
+
+                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time))
+
+                        # is_top
+                        if 'is_top' not in videoList[i]:
+                            is_top = 0
+                        else:
+                            is_top = videoList[i]['is_top']
+
+                        # user_name
+                        if 'user_info' not in videoList[i]:
+                            user_name = 0
+                        elif 'name' not in videoList[i]['user_info']:
+                            user_name = 0
+                        else:
+                            user_name = videoList[i]['user_info']['name']
+
+                        # user_id
+                        if 'user_info' not in videoList[i]:
+                            user_id = 0
+                        elif 'user_id' not in videoList[i]['user_info']:
+                            user_id = 0
+                        else:
+                            user_id = videoList[i]['user_info']['user_id']
+
+                        # avatar_url
+                        if 'user_info' not in videoList[i]:
+                            avatar_url = 0
+                        elif 'avatar_url' not in videoList[i]['user_info']:
+                            avatar_url = 0
+                        else:
+                            avatar_url = videoList[i]['user_info']['avatar_url']
+
+                        # cover_url
+                        if 'video_detail_info' not in videoList[i]:
+                            cover_url = 0
+                        elif 'detail_video_large_image' not in videoList[i]['video_detail_info']:
+                            cover_url = 0
+                        elif 'url' in videoList[i]['video_detail_info']['detail_video_large_image']:
+                            cover_url = videoList[i]['video_detail_info']['detail_video_large_image']['url']
+                        else:
+                            cover_url = videoList[i]['video_detail_info']['detail_video_large_image']['url_list'][0]['url']
+
+                        while True:
+                            rule_dict = cls.get_rule(log_type, crawler)
+                            if rule_dict is None:
+                                Common.logger(log_type, crawler).warning(f"rule_dict:{rule_dict}, 10秒后重试")
+                                time.sleep(10)
+                            else:
+                                break
+
+                        if gid == 0 or video_id == 0 or cover_url == 0:
+                            Common.logger(log_type, crawler).info('无效视频\n')
+                        elif is_top is True and int(time.time()) - int(publish_time) > 3600 * 24 * rule_dict['publish_time']:
+                            Common.logger(log_type, crawler).info(f'置顶视频,且发布时间:{publish_time_str} 超过{rule_dict["publish_time"]}天\n')
+                        elif int(time.time()) - int(publish_time) > 3600 * 24 * rule_dict['publish_time']:
+                            Common.logger(log_type, crawler).info(f'发布时间:{publish_time_str}超过{rule_dict["publish_time"]}天\n')
+                            cls.offset = 0
+                            return
+                        else:
+                            video_url_dict = cls.get_video_url(log_type, crawler, gid)
+                            video_url = video_url_dict["video_url"]
+                            audio_url = video_url_dict["audio_url"]
+                            video_width = video_url_dict["video_width"]
+                            video_height = video_url_dict["video_height"]
+
+                            video_dict = {'video_title': video_title,
+                                          'video_id': video_id,
+                                          'gid': gid,
+                                          'play_cnt': play_cnt,
+                                          'comment_cnt': comment_cnt,
+                                          'like_cnt': like_cnt,
+                                          'share_cnt': share_cnt,
+                                          'video_width': video_width,
+                                          'video_height': video_height,
+                                          'duration': video_duration,
+                                          'publish_time_stamp': publish_time,
+                                          'publish_time_str': publish_time_str,
+                                          'is_top': is_top,
+                                          'user_name': user_name,
+                                          'user_id': user_id,
+                                          'avatar_url': avatar_url,
+                                          'cover_url': cover_url,
+                                          'audio_url': audio_url,
+                                          'video_url': video_url,
+                                          'session': signature}
+                            for k, v in video_dict.items():
+                                Common.logger(log_type, crawler).info(f"{k}:{v}")
+                            cls.download_publish(log_type=log_type,
+                                                 crawler=crawler,
+                                                 video_dict=video_dict,
+                                                 rule_dict=rule_dict,
+                                                 strategy=strategy,
+                                                 our_uid=our_uid,
+                                                 oss_endpoint=oss_endpoint,
+                                                 env=env,
+                                                 machine=machine)
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_videolist:{e}\n")
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env, machine):
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
+        return len(repeat_video)
+
+    # 下载 / 上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
+        try:
+            if cls.download_rule(video_dict, rule_dict) is False:
+                Common.logger(log_type, crawler).info('不满足抓取规则\n')
+            elif any(word if word in video_dict['video_title'] else False for word in cls.filter_words(log_type, crawler)) is True:
+                Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
+            elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
+                Common.logger(log_type, crawler).info('视频已下载\n')
+            # elif str(video_dict['video_id']) in [x for y in Feishu.get_values_batch(log_type, 'xigua', 'e075e9') for x in y]:
+            #     Common.logger(log_type, crawler).info('视频已下载\n')
+            # elif str(video_dict['video_id']) in [x for y in Feishu.get_values_batch(log_type, 'xigua', '3Ul6wZ') for x in y]:
+            #     Common.logger(log_type, crawler).info('视频已下载\n')
+            # elif str(video_dict['video_id']) in [x for y in Feishu.get_values_batch(log_type, 'xigua', 'QOWqMo') for x in y]:
+            #     Common.logger(log_type, crawler).info('视频已下载\n')
+            # elif str(video_dict['video_id']) in [x for y in Feishu.get_values_batch(log_type, 'xigua', 'wjhpDs') for x in y]:
+            #     Common.logger(log_type, crawler).info('视频已存在\n')
+            else:
+                # 下载视频
+                Common.download_method(log_type=log_type, crawler=crawler, text='xigua_video', title=video_dict['video_title'], url=video_dict['video_url'])
+                # 下载音频
+                Common.download_method(log_type=log_type, crawler=crawler, text='xigua_audio', title=video_dict['video_title'], url=video_dict['audio_url'])
+                # 合成音视频
+                Common.video_compose(log_type=log_type, crawler=crawler, video_dir=f"./{crawler}/videos/{video_dict['video_title']}")
+                ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+                if ffmpeg_dict is None or ffmpeg_dict['size'] == 0:
+                    Common.logger(log_type, crawler).warning(f"下载的视频无效,已删除\n")
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+                    return
+                # 下载封面
+                Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
+                # 保存视频信息至txt
+                Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+                # 上传视频
+                Common.logger(log_type, crawler).info("开始上传视频...")
+                our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                          crawler=crawler,
+                                                          strategy=strategy,
+                                                          our_uid=our_uid,
+                                                          env=env,
+                                                          oss_endpoint=oss_endpoint)
+                if env == 'dev':
+                    our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                else:
+                    our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                Common.logger(log_type, crawler).info("视频上传完成")
+
+                if our_video_id is None:
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+                    return
+
+                # 视频写入飞书
+                Feishu.insert_columns(log_type, 'xigua', "e075e9", "ROWS", 1, 2)
+                upload_time = int(time.time())
+                values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                           "定向榜",
+                           video_dict['video_title'],
+                           str(video_dict['video_id']),
+                           our_video_link,
+                           video_dict['gid'],
+                           video_dict['play_cnt'],
+                           video_dict['comment_cnt'],
+                           video_dict['like_cnt'],
+                           video_dict['share_cnt'],
+                           video_dict['duration'],
+                           str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
+                           video_dict['publish_time_str'],
+                           video_dict['user_name'],
+                           video_dict['user_id'],
+                           video_dict['avatar_url'],
+                           video_dict['cover_url'],
+                           video_dict['video_url'],
+                           video_dict['audio_url']]]
+                time.sleep(1)
+                Feishu.update_values(log_type, 'xigua', "e075e9", "F2:Z2", values)
+                Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+
+                # 视频信息保存数据库
+                insert_sql = f""" insert into crawler_video(video_id,
+                                user_id,
+                                out_user_id,
+                                platform,
+                                strategy,
+                                out_video_id,
+                                video_title,
+                                cover_url,
+                                video_url,
+                                duration,
+                                publish_time,
+                                play_cnt,
+                                crawler_rule,
+                                width,
+                                height)
+                                values({our_video_id},
+                                {our_uid},
+                                "{video_dict['user_id']}",
+                                "{cls.platform}",
+                                "定向爬虫策略",
+                                "{video_dict['video_id']}",
+                                "{video_dict['video_title']}",
+                                "{video_dict['cover_url']}",
+                                "{video_dict['video_url']}",
+                                {int(video_dict['duration'])},
+                                "{video_dict['publish_time_str']}",
+                                {int(video_dict['play_cnt'])},
+                                '{json.dumps(rule_dict)}',
+                                {int(video_dict['video_width'])},
+                                {int(video_dict['video_height'])}) """
+                Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
+                Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'download_publish异常:{e}\n')
+
+    @classmethod
+    def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
+        try:
+            user_list = cls.get_user_list(log_type=log_type, crawler=crawler, sheetid="5tlTYB", env=env, machine=machine)
+            for user in user_list:
+                out_uid = user["out_uid"]
+                user_name = user["user_name"]
+                our_uid = user["our_uid"]
+                Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
+                cls.get_videolist(log_type=log_type,
+                                  crawler=crawler,
+                                  strategy=strategy,
+                                  our_uid=our_uid,
+                                  out_uid=out_uid,
+                                  oss_endpoint=oss_endpoint,
+                                  env=env,
+                                  machine=machine)
+                cls.offset = 0
+                time.sleep(1)
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_follow_videos:{e}\n")
+
+
+if __name__ == '__main__':
+    # print(Follow.get_signature("follow", "xigua", "95420624045", "local"))
+    # Follow.get_videolist(log_type="follow",
+    #                      crawler="xigua",
+    #                      strategy="定向爬虫策略",
+    #                      our_uid="6267141",
+    #                      out_uid="95420624045",
+    #                      oss_endpoint="out",
+    #                      env="dev",
+    #                      machine="local")
+    # print(Follow.random_signature())
+    rule = Follow.get_rule("follow", "xigua")
+    print(type(rule))
+    print(type(json.dumps(rule)))
+    print(json.dumps(rule))
+    pass

+ 44 - 0
xigua/xigua_main/run_xigua_follow_scheduling.py

@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/17
+import argparse
+import os
+import sys
+import time
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from xigua.xigua_follow.xigua_follow_scheduling import Follow
+from common.feishu import Feishu
+
+
+def main(log_type, crawler, strategy, oss_endpoint, env, machine):
+    while True:
+        try:
+            Common.logger(log_type, crawler).info('开始抓取 西瓜视频 定向榜\n')
+            Follow.get_follow_videos(log_type, crawler, strategy, oss_endpoint, env, machine)
+            Common.del_logs(log_type, crawler)
+            Common.logger(log_type, crawler).info('抓取完一轮,休眠 1 分钟\n')
+            time.sleep(60)
+        except Exception as e:
+            Common.logger(log_type, crawler).info(f"西瓜视频异常,触发报警:{e}\n")
+            Feishu.bot(log_type, crawler, f"{e}")
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--strategy')  ## 添加参数
+    parser.add_argument('--our_uid')  ## 添加参数
+    parser.add_argument('--oss_endpoint')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    parser.add_argument('--machine')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    # print(args)
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         strategy=args.strategy,
+         oss_endpoint=args.oss_endpoint,
+         env=args.env,
+         machine=args.machine)