wangkun 2 роки тому
батько
коміт
831b5ba32c

+ 27 - 10
README.MD

@@ -16,22 +16,39 @@ ${machine}:         爬虫运行机器,阿里云服务器: aliyun_hk / aliyun
 ${nohup_dir}:       nohup日志存储路径,如: ./youtube/nohup.log
 ```
 
-### 已上线爬虫运行命令示例
+#### YouTube
 ```
-西瓜视频运行命令: 
-阿里云 102 服务器
-sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" xigua/nohup.log
-本机
-sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="out" --env="prod" --machine="local" xigua/nohup.log
-杀进程命令:
-ps aux | grep run_xigua | grep -v grep | awk '{print $2}' | xargs kill -9
-
-youtube定向榜运行命令: 
 sh ./main/main.sh ./youtube/youtube_main/run_youtube_follow.py --log_type="follow" --crawler="youtube" --strategy="定向爬虫策略" --oss_endpoint="hk" --env="prod" --machine="aliyun_hk" youtube/nohup.log
 youtube杀进程命令: 
+ps aux | grep run_youtube
 ps aux | grep run_youtube | grep -v grep | awk '{print $2}' | xargs kill -9
+```
 
+#### 微信指数
+```
 微信指数杀进程
+ps aux | grep run_weixinzhishu
 ps aux | grep run_weixinzhishu | grep -v grep | awk '{print $2}' | xargs kill -9
+```
+
+#### 西瓜视频
+```
+阿里云 102 服务器
+sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" xigua/nohup.log
+本机
+sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="out" --env="prod" --machine="local" xigua/nohup.log
+杀进程命令:
+ps aux | grep run_xigua
+ps aux | grep run_xigua | grep -v grep | awk '{print $2}' | xargs kill -9
+```
 
+#### 快手
 ```
+阿里云 102 服务器
+sh ./main/main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="follow" --crawler="kuaishou" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" kuaishou/nohup.log
+本机
+sh ./main/main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="follow" --crawler="kuaishou" --strategy="定向爬虫策略" --oss_endpoint="out" --env="dev" --machine="local" kuaishou/nohup.log
+杀进程命令:
+ps aux | grep run_kuaishou
+ps aux | grep run_kuaishou | grep -v grep | awk '{print $2}' | xargs kill -9
+```

+ 0 - 1
common/users.py

@@ -60,7 +60,6 @@ class Users:
         补全飞书用户表信息,并返回
         :param log_type: 日志
         :param crawler: 哪款爬虫
-        :param sheetid: 飞书表
         :param out_user_dict: 站外用户信息字典
         :param env: 正式环境:prod,测试环境:dev
         :param machine: 部署机器,阿里云填写 aliyun,aliyun_hk ,线下分别填写 macpro,macair,local

+ 25 - 2
kuaishou/kuaishou_follow/insert_videos.py

@@ -3,7 +3,10 @@
 # @Time: 2023/2/27
 import json
 import os
+import random
+import string
 import sys
+import time
 
 sys.path.append(os.getcwd())
 from common.common import Common
@@ -23,7 +26,7 @@ class Insert:
                 if kuaishou_sheet[i][5] is None:
                     continue
                 if kuaishou_sheet[i][9] is None:
-                    video_id = 0
+                    video_id = int(time.time())
                 else:
                     video_id = kuaishou_sheet[i][9].replace("https://admin.piaoquantv.com/cms/post-detail/", "").replace("/info", "")
                 if video_id == "None":
@@ -38,7 +41,7 @@ class Insert:
                 cover_url = str(kuaishou_sheet[i][20])
                 video_url = str(kuaishou_sheet[i][21])
                 duration = int(kuaishou_sheet[i][14])
-                publish_time = str(kuaishou_sheet[i][16].replace("/", "-"))
+                publish_time = str(kuaishou_sheet[i][16]).replace("/", "-")
                 play_cnt = int(kuaishou_sheet[i][10])
                 like_cnt = int(kuaishou_sheet[i][12])
                 share_cnt = int(kuaishou_sheet[i][13])
@@ -151,8 +154,28 @@ class Insert:
         sheet = Feishu.get_values_batch("insert", "kuaishou", "fYdA8F")
         print(sheet)
 
+    @classmethod
+    def random_out_uid(cls):
+        did = "web_e2901e1c5a13c60af81ba88bc7a3ee24"
+        userId = "1921947321"
+        did = "web_e2901e1c5a13c60af81ba88bc7a3ee24"
+        userId = "3352428474"
+        src_digits = string.digits  # string_数字
+        src_uppercase = string.ascii_uppercase  # string_大写字母
+        src_lowercase = string.ascii_lowercase  # string_小写字母
+        # 10位随机数的方法
+        userId = ''.join(str(random.choice(range(1, 10))) for _ in range(10))
+        print(type(userId))
+        print(userId)
+        # 生成5位随机字符,包括大小写字母和数字
+        a_str = ''.join(random.sample(string.ascii_letters + string.digits, 5))
+        out_uid = ''.join(random.sample(string.digits, 10))
+        print(type(out_uid))
+        print(out_uid)
+
 
 if __name__ == "__main__":
     Insert.insert_video_from_feishu_to_mysql("insert-prod", "kuaishou", "prod", "local")
     # Insert.get_sheet()
+    # Insert.random_out_uid()
     pass

+ 570 - 625
kuaishou/kuaishou_follow/kuaishou_follow.py

@@ -3,700 +3,645 @@
 # @Time: 2023/2/24
 import os
 import random
+import shutil
 import sys
 import time
 import requests
-import urllib3
+import json
+
 sys.path.append(os.getcwd())
 from common.common import Common
 from common.feishu import Feishu
+from common.users import Users
+from common.db import MysqlHelper
 from common.publish import Publish
-proxies = {"http": None, "https": None}
 
 
 class Follow:
-    # 已抓取视频数量
-    get_person_video_count = []
-    get_all_video_count = []
-    # 小程序:关注列表翻页参数
-    follow_pcursor = ""
-    # 小程序:个人主页视频列表翻页参数
-    person_pcursor = ""
-    # 视频发布时间
-    send_time = 0
-    # 配置微信
-    wechat_sheet = Feishu.get_values_batch("follow", "kuaishou", "WFF4jw")
-    Referer = wechat_sheet[2][3]
-    NS_sig3 = wechat_sheet[3][3]
-    NS_sig3_origin = wechat_sheet[4][3]
-    did = wechat_sheet[5][3]
-    session_key = wechat_sheet[6][3]
-    unionid = wechat_sheet[7][3]
-    eUserStableOpenId = wechat_sheet[8][3]
-    openId = wechat_sheet[9][3]
-    eOpenUserId = wechat_sheet[10][3]
-    kuaishou_wechat_app_st = wechat_sheet[11][3]
-    passToken = wechat_sheet[12][3]
-    userId = wechat_sheet[13][3]
-
-    # 过滤敏感词
+    platform = "快手"
+    tag = "快手爬虫,定向爬虫策略"
+
     @classmethod
-    def sensitive_words(cls):
-        # 敏感词库列表
-        word_list = []
-        # 从云文档读取所有敏感词,添加到词库列表
-        lists = Feishu.get_values_batch("follow", "kuaishou", "HIKVvs")
-        for i in lists:
-            for j in i:
-                # 过滤空的单元格内容
-                if j is None:
-                    pass
-                else:
-                    word_list.append(j)
-        return word_list
-
-    # 下载规则
-    @staticmethod
-    def download_rule(d_duration, d_width, d_height, d_play_cnt, d_like_cnt, d_share_cnt):
-        """
-        下载视频的基本规则
-        :param d_duration: 时长
-        :param d_width: 宽
-        :param d_height: 高
-        :param d_play_cnt: 播放量
-        :param d_like_cnt: 点赞量
-        :param d_share_cnt: 分享量
-        :return: 满足规则,返回 True;反之,返回 False
-        """
-        if int(float(d_duration)) >= 40:
-            if int(d_width) >= 0 or int(d_height) >= 0:
-                if int(d_play_cnt) >= 5000:
-                    if int(d_like_cnt) >= 5000 or int(d_share_cnt) >= 1000:
-                        return True
-                    else:
-                        return False
-                else:
-                    return False
-            else:
-                return False
+    def get_rule(cls, log_type, crawler, index):
+        try:
+            while True:
+                rule_sheet = Feishu.get_values_batch(log_type, crawler, "3iqG4z")
+                if rule_sheet is None:
+                    Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
+                    time.sleep(10)
+                    continue
+                if index == 1:
+                    rule_dict = {
+                        "play_cnt": f"{rule_sheet[1][1]}{rule_sheet[1][2]}",
+                        "video_width": f"{rule_sheet[2][1]}{rule_sheet[2][2]}",
+                        "video_height": f"{rule_sheet[3][1]}{rule_sheet[3][2]}",
+                        "like_cnt": f"{rule_sheet[4][1]}{rule_sheet[4][2]}",
+                        "duration": f"{rule_sheet[5][1]}{rule_sheet[5][2]}",
+                        "download_cnt": f"{rule_sheet[6][1]}{rule_sheet[6][2]}",
+                        "publish_time": f"{rule_sheet[7][1]}{rule_sheet[7][2]}",
+                    }
+                    # for k, v in rule_dict.items():
+                    #     Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    return rule_dict
+                elif index == 2:
+                    rule_dict = {
+                        "play_cnt": f"{rule_sheet[9][1]}{rule_sheet[9][2]}",
+                        "video_width": f"{rule_sheet[10][1]}{rule_sheet[10][2]}",
+                        "video_height": f"{rule_sheet[11][1]}{rule_sheet[11][2]}",
+                        "like_cnt": f"{rule_sheet[12][1]}{rule_sheet[12][2]}",
+                        "duration": f"{rule_sheet[13][1]}{rule_sheet[13][2]}",
+                        "download_cnt": f"{rule_sheet[14][1]}{rule_sheet[14][2]}",
+                        "publish_time": f"{rule_sheet[15][1]}{rule_sheet[15][2]}",
+                    }
+                    # for k, v in rule_dict.items():
+                    #     Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    return rule_dict
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
+
+    @classmethod
+    def download_rule(cls, video_dict, rule_dict):
+        if eval(f"{video_dict['play_cnt']}{rule_dict['play_cnt']}") is True\
+                and eval(f"{video_dict['video_width']}{rule_dict['video_width']}") is True\
+                and eval(f"{video_dict['video_height']}{rule_dict['video_height']}") is True\
+                and eval(f"{video_dict['like_cnt']}{rule_dict['like_cnt']}") is True\
+                and eval(f"{video_dict['duration']}{rule_dict['duration']}") is True\
+                and eval(f"{video_dict['publish_time']}{rule_dict['publish_time']}") is True:
+            return True
         else:
             return False
 
-    # 删除飞书关注人列表
+    # 过滤词库
     @classmethod
-    def del_follow_user_from_feishu(cls, log_type):
+    def filter_words(cls, log_type, crawler):
         try:
             while True:
-                follow_sheet = Feishu.get_values_batch(log_type, "kuaishou", "2OLxLr")
-                if len(follow_sheet) == 1:
-                    Common.logger(log_type).info('删除完成\n')
-                    return
-                else:
-                    for i in range(1, len(follow_sheet)):
-                        Feishu.dimension_range(log_type, "kuaishou", "2OLxLr", 'ROWS', i+1, i+1)
-                        time.sleep(0.5)
-                        break
+                filter_words_sheet = Feishu.get_values_batch(log_type, crawler, 'HIKVvs')
+                if filter_words_sheet is None:
+                    Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
+                    continue
+                filter_words_list = []
+                for x in filter_words_sheet:
+                    for y in x:
+                        if y is None:
+                            pass
+                        else:
+                            filter_words_list.append(y)
+                return filter_words_list
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
+
+    # 万能标题
+    @classmethod
+    def random_title(cls, log_type, crawler):
+        try:
+            while True:
+                random_title_sheet = Feishu.get_values_batch(log_type, crawler, '0DiyXe')
+                if random_title_sheet is None:
+                    Common.logger(log_type, crawler).warning(f"filter_words_sheet:{random_title_sheet} 10秒钟后重试")
+                    continue
+                random_title_list = []
+                for x in random_title_sheet:
+                    for y in x:
+                        if y is None:
+                            pass
+                        else:
+                            random_title_list.append(y)
+                return random.choice(random_title_list)
         except Exception as e:
-            Common.logger(log_type).error('del_follow_user_from_feishu异常:{}', e)
+            Common.logger(log_type, crawler).error(f'random_title:{e}\n')
 
-    # 从小程序中,关注用户列表同步至云文档
+    # 获取站外用户信息
     @classmethod
-    def get_follow_users_to_feishu(cls, log_type):
+    def get_out_user_info(cls, log_type, crawler, out_uid):
         try:
-            follow_list = []
-            follow_sheet = Feishu.get_values_batch(log_type, "kuaishou", "2OLxLr")
-            url = "https://wxmini-api.uyouqu.com/rest/wd/wechatApp/relation/fol?"
+            url = "https://www.kuaishou.com/graphql"
+            payload = json.dumps({
+                "operationName": "visionProfile",
+                "variables": {
+                    "userId": out_uid
+                },
+                "query": "query visionProfile($userId: String) {\n  visionProfile(userId: $userId) {\n    result\n    hostName\n    userProfile {\n      ownerCount {\n        fan\n        photo\n        follow\n        photo_public\n        __typename\n      }\n      profile {\n        gender\n        user_name\n        user_id\n        headurl\n        user_text\n        user_profile_bg_url\n        __typename\n      }\n      isFollowing\n      __typename\n    }\n    __typename\n  }\n}\n"
+            })
             headers = {
-                "content-type": "application/json",
-                "Accept-Encoding": "gzip,compress,br,deflate",
-                "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
-                              ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148'
-                              ' MicroMessenger/8.0.20(0x18001442) NetType/WIFI Language/zh_CN',
-                "Referer": str(cls.Referer),
+                'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId={"".join(str(random.choice(range(1, 10))) for _ in range(10))}; kpn=KUAISHOU_VISION; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABE4wGjnJauApJelOpl9Xqo8TVDAyra7Pvo0rZtVgMSZxgVuw4Z6P2UtHv_CHOk2Ne2el1hdE_McCptWs8tRdtYlhXFlVOu8rQX7CwexzOBudJAfB3lDN8LPc4o4qHNwqFxy5J5j_WzdllbqMmaDUK9yUxX6XA-JFezzq9jvBwtGv7_hzB7pFrUcH39z0EYOQaZo5lDl-pE09Gw7wr8NvlZRoSdWlbobCW6oJxuQLJTUr9oj_uIiBhkeb1psaIIc3VwfYQ1UfvobrXAP_WpnRabE_3UZUBOygFMAE; kuaishou.server.web_ph=2b981e2051d7130c977fd31df97fe6f5ad54',
+                'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
+                'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
+                'content-type': 'application/json',
+                # 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
+                # 'Cache-Control': 'no-cache',
+                # 'Connection': 'keep-alive',
+                # 'Origin': 'https://www.kuaishou.com',
+                # 'Pragma': 'no-cache',
+                # 'Sec-Fetch-Dest': 'empty',
+                # 'Sec-Fetch-Mode': 'cors',
+                # 'Sec-Fetch-Site': 'same-origin',
+                # 'accept': '*/*',
+                # 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
+                # 'sec-ch-ua-mobile': '?0',
+                # 'sec-ch-ua-platform': '"macOS"'
             }
-            params = {
-                "__NS_sig3": str(cls.NS_sig3),
-                "__NS_sig3_origin": str(cls.NS_sig3_origin)
-            }
-            cookies = {
-                "did": str(cls.did),
-                "preMinaVersion": "v3.109.0",
-                "sid": "kuaishou.wechat.app",
-                "appId": "ks_wechat_small_app_2",
-                "clientid": "13",
-                "client_key": "f60ac815",
-                "kpn": "WECHAT_SMALL_APP",
-                "kpf": "OUTSIDE_ANDROID_H5",
-                "language": "zh_CN",
-                "smallAppVersion": "v3.114.0",
-                "session_key": str(cls.session_key),
-                "unionid": str(cls.unionid),
-                "eUserStableOpenId": str(cls.eUserStableOpenId),
-                "openId": str(cls.openId),
-                "eOpenUserId": str(cls.eOpenUserId),
-                "kuaishou.wechat.app_st": str(cls.kuaishou_wechat_app_st),
-                "passToken": str(cls.passToken),
-                "userId": str(cls.userId)
-            }
-            json_text = {
-                "count": 20,
-                "pcursor": str(cls.follow_pcursor),
-                "ftype": 1
-            }
-            urllib3.disable_warnings()
-            r = requests.post(url=url, headers=headers, params=params,
-                              cookies=cookies, json=json_text, proxies=proxies, verify=False)
-            if "fols" not in r.json():
-                Common.logger(log_type).warning("从小程序中获取关注用户列表:{}", r.text)
+            response = requests.post(url=url, headers=headers, data=payload)
+            if response.status_code != 200:
+                Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.text}\n")
+                return
+            elif 'data' not in response.json():
+                Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()}\n")
+                return
+            elif 'visionProfile' not in response.json()['data']:
+                Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()['data']}\n")
+                return
+            elif 'userProfile' not in response.json()['data']['visionProfile']:
+                Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()['data']['visionProfile']['userProfile']}\n")
+                return
             else:
-                users = r.json()["fols"]
-                for i in range(len(users)):
-                    uid = users[i]["targetId"]
-                    nick = users[i]["targetName"]
-                    sex = users[i]["targetSex"]
-                    description = users[i]["targetUserText"]
-                    if "followReason" in users[i]:
-                        follow_reason = users[i]["followReason"]
+                userProfile = response.json()['data']['visionProfile']['userProfile']
+                out_user_dict = {}
+                if 'ownerCount' not in userProfile:
+                    out_user_dict['out_fans'] = 0
+                    out_user_dict['out_fans'] = 0
+                elif 'fan' not in userProfile['ownerCount']:
+                    out_user_dict['out_fans'] = 0
+                elif 'follow' not in userProfile['ownerCount']:
+                    out_user_dict['out_fans'] = 0
+                else:
+                    out_fans_str = str(userProfile['ownerCount']['fan'])
+                    out_follow_str = str(userProfile['ownerCount']['follow'])
+                    if "万" in out_fans_str:
+                        out_user_dict['out_fans'] = int(float(out_fans_str.split("万")[0]) * 10000)
                     else:
-                        follow_reason = ""
-                    follow_time = users[i]["time"]
-                    is_friend = users[i]["isFriend"]
-                    # print(f"uid:{uid}")
-                    follow_list.append(uid)
-                    # print(f"follow_list:{follow_list}")
-                    # 同步已关注的用户至云文档
-                    if uid not in [j for i in follow_sheet for j in i]:
-                        time.sleep(1)
-                        Feishu.insert_columns(log_type, "kuaishou", "2OLxLr", "ROWS", 1, 2)
-                        time.sleep(1)
-                        values = [[uid, nick, sex, description, follow_reason, follow_time, str(is_friend)]]
-                        Feishu.update_values(log_type, "kuaishou", "2OLxLr", "A2:L2", values)
+                        out_user_dict['out_fans'] = int(out_fans_str.replace(",", ""))
+                    if "万" in out_follow_str:
+                        out_user_dict['out_follow'] = int(float(out_follow_str.split("万")[0]) * 10000)
                     else:
-                        Common.logger(log_type).info("用户:{},在云文档中已存在", nick)
-            cls.follow_pcursor = r.json()["pcursor"]
-            # 翻页,直至到底了
-            if cls.follow_pcursor != "no_more":
-                cls.get_follow_users_to_feishu(log_type)
-            else:
-                Common.logger(log_type).info("从小程序中同步关注用户至云文档完成\n")
+                        out_user_dict['out_follow'] = int(out_follow_str.replace(",", ""))
+
+                if 'profile' not in userProfile:
+                    out_user_dict['out_avatar_url'] = ''
+                elif 'headurl' not in userProfile['profile']:
+                    out_user_dict['out_avatar_url'] = ''
+                else:
+                    out_user_dict['out_avatar_url'] = userProfile['profile']['headurl']
+
+                return out_user_dict
         except Exception as e:
-            Common.logger(log_type).error("从小程序中,关注用户列表同步至云文档异常:{}\n", e)
+            Common.logger(log_type, crawler).error(f"get_out_user_info:{e}\n")
 
-    # 从云文档获取关注用户列表
+    # 获取用户信息列表
     @classmethod
-    def get_follow_users(cls, log_type):
+    def get_user_list(cls, log_type, crawler, sheetid, env, machine):
         try:
-            follow_sheet = Feishu.get_values_batch(log_type, "kuaishou", "2OLxLr")
-            if len(follow_sheet) == 1:
-                Common.logger(log_type).info("暂无关注用户")
-            else:
-                follow_dict = {}
-                for i in range(1, len(follow_sheet)):
-                    uid = follow_sheet[i][0]
-                    nick = follow_sheet[i][1]
-                    if uid is None or nick is None:
-                        pass
+            while True:
+                user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+                if user_sheet is None:
+                    Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet} 10秒钟后重试")
+                    continue
+                our_user_list = []
+                for i in range(1, len(user_sheet)):
+                # for i in range(1, 2):
+                    out_uid = user_sheet[i][2]
+                    user_name = user_sheet[i][3]
+                    our_uid = user_sheet[i][6]
+                    our_user_link = user_sheet[i][7]
+                    if out_uid is None or user_name is None:
+                        Common.logger(log_type, crawler).info("空行\n")
                     else:
-                        follow_dict[nick] = uid
-                return follow_dict
+                        Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息\n")
+                        if our_uid is None:
+                            out_user_info = cls.get_out_user_info(log_type, crawler, out_uid)
+                            out_user_dict = {
+                                "out_uid": out_uid,
+                                "user_name": user_name,
+                                "out_avatar_url": out_user_info["out_avatar_url"],
+                                "out_create_time": '',
+                                "out_tag": '',
+                                "out_play_cnt": 0,
+                                "out_fans": out_user_info["out_fans"],
+                                "out_follow": out_user_info["out_follow"],
+                                "out_friend": 0,
+                                "out_like": 0,
+                                "platform": cls.platform,
+                                "tag": cls.tag,
+                            }
+                            our_user_dict = Users.create_user(log_type=log_type, crawler=crawler,
+                                                              out_user_dict=out_user_dict, env=env, machine=machine)
+                            our_uid = our_user_dict['our_uid']
+                            our_user_link = our_user_dict['our_user_link']
+                            Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
+                                                 [[our_uid, our_user_link]])
+                            Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
+                            our_user_list.append(our_user_dict)
+                        else:
+                            our_user_dict = {
+                                'out_uid': out_uid,
+                                'user_name': user_name,
+                                'our_uid': our_uid,
+                                'our_user_link': our_user_link,
+                            }
+                            our_user_list.append(our_user_dict)
+                return our_user_list
         except Exception as e:
-            Common.logger(log_type).error("从云文档获取关注用户列表异常:{}\n", e)
+            Common.logger(log_type, crawler).error(f'get_user_list:{e}\n')
 
-    # 从云文档获取取消关注用户列表
+    # 处理视频标题
     @classmethod
-    def get_unfollow_users(cls, log_type):
-        try:
-            unfollow_sheet = Feishu.get_values_batch(log_type, "kuaishou", "WRveYg")
-            if len(unfollow_sheet) == 1:
-                Common.logger(log_type).info("暂无取消关注用户")
-            else:
-                unfollow_list = []
-                nick_list = []
-                for i in range(1, len(unfollow_sheet)):
-                    uid = unfollow_sheet[i][0]
-                    nick = unfollow_sheet[i][1]
-                    nick_list.append(nick)
-                    unfollow_list.append(uid)
-                Common.logger(log_type).info("取消关注用户列表:{}", nick_list)
-                return unfollow_list
-        except Exception as e:
-            Common.logger(log_type).error("从云文档获取取消关注用户列表异常:{}", e)
+    def video_title(cls, log_type, crawler, title):
+        title_split1 = title.split(" #")
+        if title_split1[0] != "":
+            title1 = title_split1[0]
+        else:
+            title1 = title_split1[-1]
 
-    # 小程序:关注/取消关注用户
-    @classmethod
-    def follow_unfollow(cls, log_type, is_follow, uid):
-        try:
-            url = "https://wxmini-api.uyouqu.com/rest/wd/wechatApp/relation/follow?"
-            headers = {
-                "content-type": "application/json",
-                "Accept-Encoding": "gzip,compress,br,deflate",
-                "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
-                              ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148'
-                              ' MicroMessenger/8.0.20(0x18001442) NetType/WIFI Language/zh_CN',
-                "Referer": str(cls.Referer),
-            }
-            params = {
-                "__NS_sig3": str(cls.NS_sig3),
-                "__NS_sig3_origin": str(cls.NS_sig3_origin)
-            }
-            cookies = {
-                "did": str(cls.did),
-                "preMinaVersion": "v3.109.0",
-                "sid": "kuaishou.wechat.app",
-                "appId": "ks_wechat_small_app_2",
-                "clientid": "13",
-                "client_key": "f60ac815",
-                "kpn": "WECHAT_SMALL_APP",
-                "kpf": "OUTSIDE_ANDROID_H5",
-                "language": "zh_CN",
-                "smallAppVersion": "v3.114.0",
-                "session_key": str(cls.session_key),
-                "unionid": str(cls.unionid),
-                "eUserStableOpenId": str(cls.eUserStableOpenId),
-                "openId": str(cls.openId),
-                "eOpenUserId": str(cls.eOpenUserId),
-                "kuaishou.wechat.app_st": str(cls.kuaishou_wechat_app_st),
-                "passToken": str(cls.passToken),
-                "userId": str(cls.userId)
-            }
+        title_split2 = title1.split(" #")
+        if title_split2[0] != "":
+            title2 = title_split2[0]
+        else:
+            title2 = title_split2[-1]
 
-            if is_follow == "follow":
-                ftype = 1
-            elif is_follow == "unfollow":
-                ftype = 2
-            else:
-                ftype = 1
+        title_split3 = title2.split("@")
+        if title_split3[0] != "":
+            title3 = title_split3[0]
+        else:
+            title3 = title_split3[-1]
+
+        video_title = title3.strip().replace("\n", "") \
+                          .replace("/", "").replace("快手", "").replace(" ", "") \
+                          .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
+                          .replace("#", "").replace(".", "。").replace("\\", "") \
+                          .replace(":", "").replace("*", "").replace("?", "") \
+                          .replace("?", "").replace('"', "").replace("<", "") \
+                          .replace(">", "").replace("|", "").replace("@", "")[:40]
+        if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
+            return cls.random_title(log_type, crawler)
+        else:
+            return video_title
 
-            json_text = {
-                "touid": uid,
-                "ftype": ftype,
-                "page_ref": 84
-            }
-            r = requests.post(url=url, headers=headers, cookies=cookies, params=params, json=json_text)
-            if is_follow == "follow":
-                if r.json()["result"] != 1:
-                    Common.logger(log_type).warning("{}", r.text)
-                else:
-                    Common.logger(log_type).info("关注:{}, {}", uid, r)
+    @classmethod
+    def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
+        download_cnt_1, download_cnt_2 = 0, 0
+        pcursor = ""
+
+        while True:
+            rule_dict_1 = cls.get_rule(log_type, crawler, 1)
+            rule_dict_2 = cls.get_rule(log_type, crawler, 2)
+            if rule_dict_1 is None or rule_dict_2 is None:
+                Common.logger(log_type, crawler).warning(f"rule_dict is None, 10秒后重试")
+                time.sleep(10)
             else:
-                if r.json()["result"] != 1:
-                    Common.logger(log_type).warning("{}", r.text)
-                else:
-                    Common.logger(log_type).info("取消关注:{}, {}", uid, r)
-        except Exception as e:
-            Common.logger(log_type).error("关注/取消关注异常:{}", e)
+                break
 
-    # 获取个人主页视频
-    @classmethod
-    def get_user_videos(cls, log_type, uid):
-        try:
-            time.sleep(1)
-            url = "https://wxmini-api.uyouqu.com/rest/wd/wechatApp/feed/profile?"
+        while True:
+            if download_cnt_1 >=int(rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]) and download_cnt_2 >= int(rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
+                Common.logger(log_type, crawler).info(f"规则1已下载{download_cnt_1}条视频,规则2已下载{download_cnt_2}条视频\n")
+                return
+
+            url = "https://www.kuaishou.com/graphql"
+            payload = json.dumps({
+                "operationName": "visionProfilePhotoList",
+                "variables": {
+                    "userId": out_uid,
+                    "pcursor": pcursor,
+                    "page": "profile"
+                },
+                "query": "fragment photoContent on PhotoEntity {\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  __typename\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n  visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n    result\n    llsid\n    webPageArea\n    feeds {\n      ...feedContent\n      __typename\n    }\n    hostName\n    pcursor\n    __typename\n  }\n}\n"
+            })
             headers = {
-                "content-type": "application/json",
-                "Accept-Encoding": "gzip,compress,br,deflate",
-                "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) '
-                              'AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
-                              'MicroMessenger/8.0.26(0x18001a34) NetType/WIFI Language/zh_CN',
-                "Referer": str(cls.Referer),
-            }
-            params = {
-                "__NS_sig3": str(cls.NS_sig3),
-                "__NS_sig3_origin": str(cls.NS_sig3_origin)
-            }
-            cookies = {
-                "did": str(cls.did),
-                "sid": "kuaishou.wechat.app",
-                "appId": "ks_wechat_small_app_2",
-                "clientid": "13",
-                "client_key": "f60ac815",
-                "kpn": "WECHAT_SMALL_APP",
-                "kpf": "OUTSIDE_IOS_H5",
-                "language": "zh_CN",
-                "smallAppVersion": "v3.131.0",
-                "mod": "iPhone(11<iPhone12%2C1>)",
-                "sys": "iOS%2014.7.1",
-                'wechatVersion': '8.0.26',
-                "brand": "iPhone",
-                "session_key": str(cls.session_key),
-                "unionid": str(cls.unionid),
-                "eUserStableOpenId": str(cls.eUserStableOpenId),
-                "openId": str(cls.openId),
-                "eOpenUserId": str(cls.eOpenUserId),
-                "kuaishou.wechat.app_st": str(cls.kuaishou_wechat_app_st),
-                "passToken": str(cls.passToken),
-                "userId": str(cls.userId)
+                'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId={"".join(str(random.choice(range(1, 10))) for _ in range(10))}; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABOLgYYcIJ5ilxU46Jc-HLWThY8sppX3V0htC_KhSGOzAjP2hAOdegzfkZGAxS5rf6rCBS487FkxfYzLkV__I6b1lK16rDjvv94Kkoo4z7mgf8y8rFgWoqrp81JAWTtx00y-wrc1XXPf9RAVQoET70wWaeNG2r5bxtZEiNwpK_zPi0ZdUo0BW13dFKfVssAy2xKYh0UlJ8VSd_vBvyMKSxVBoSf061Kc3w5Nem7YdpVBmH39ceIiBpiGioLzbZqlHiSbwkH_LhUhNXz3o7LITj098KUytk2CgFMAE; kuaishou.server.web_ph=f1033957981996a7d50e849a9ded4cf4adff; kpn=KUAISHOU_VISION',
+                'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
+                'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
+                'content-type': 'application/json',
+                # 'accept': '*/*',
+                # 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
+                # 'Cache-Control': 'no-cache',
+                # 'Connection': 'keep-alive',
+                # 'Origin': 'https://www.kuaishou.com',
+                # 'Pragma': 'no-cache',
+                # 'Sec-Fetch-Dest': 'empty',
+                # 'Sec-Fetch-Mode': 'cors',
+                # 'Sec-Fetch-Site': 'same-origin',
+                # 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
+                # 'sec-ch-ua-mobile': '?0',
+                # 'sec-ch-ua-platform': '"macOS"'
             }
-            json_text = {
-                "count": 12,
-                "pcursor": str(cls.person_pcursor),
-                "eid": str(uid)
-            }
-            urllib3.disable_warnings()
-            r = requests.post(url=url, headers=headers, params=params, cookies=cookies,
-                              json=json_text, proxies=proxies, verify=False)
-            # Common.logger(log_type).info("response:{}\n\n", r.text)
-            if "feeds" not in r.json():
-                # Feishu.bot(log_type, "follow:get_videos_from_person:"+r.text)
-                Common.logger(log_type).warning("response:{}", r.text)
-            elif r.json()["feeds"] == 0:
-                Common.logger(log_type).warning("用户主页无视频\n")
+            response = requests.post(url=url, headers=headers, data=payload)
+            if response.status_code != 200:
+                Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.text}\n")
+                return
+            elif 'data' not in response.json():
+                Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
+                return
+            elif 'visionProfilePhotoList' not in response.json()['data']:
+                Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
+                return
+            elif 'feeds' not in response.json()['data']['visionProfilePhotoList']:
+                Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']['visionProfilePhotoList']}\n")
+                return
+            elif len(response.json()['data']['visionProfilePhotoList']['feeds']) == 0:
+                Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
                 return
             else:
-                feeds = r.json()["feeds"]
+                feeds = response.json()['data']['visionProfilePhotoList']['feeds']
+                pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
                 for i in range(len(feeds)):
-                    # 视频标题过滤话题及处理特殊字符
-                    kuaishou_title = feeds[i]["caption"]
-                    title_split1 = kuaishou_title.split(" #")
-                    if title_split1[0] != "":
-                        title1 = title_split1[0]
-                    else:
-                        title1 = title_split1[-1]
-
-                    title_split2 = title1.split(" #")
-                    if title_split2[0] != "":
-                        title2 = title_split2[0]
-                    else:
-                        title2 = title_split2[-1]
-
-                    title_split3 = title2.split("@")
-                    if title_split3[0] != "":
-                        title3 = title_split3[0]
-                    else:
-                        title3 = title_split3[-1]
-
-                    video_title = title3.strip().replace("\n", "") \
-                        .replace("/", "").replace("快手", "").replace(" ", "") \
-                        .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
-                        .replace("#", "").replace(".", "。").replace("\\", "") \
-                        .replace(":", "").replace("*", "").replace("?", "") \
-                        .replace("?", "").replace('"', "").replace("<", "") \
-                        .replace(">", "").replace("|", "").replace("@", "")[:40]
-
-                    if "photoId" not in feeds[i]:
-                        video_id = "0"
-                    else:
-                        video_id = feeds[i]["photoId"]
-
-                    if "viewCount" not in feeds[i]:
-                        video_play_cnt = "0"
-                    else:
-                        video_play_cnt = feeds[i]["viewCount"]
-
-                    if "likeCount" not in feeds[i]:
-                        video_like_cnt = "0"
-                    else:
-                        video_like_cnt = feeds[i]["likeCount"]
+                    if 'photo' not in feeds[i]:
+                        Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]}\n")
+                        break
 
-                    if "shareCount" not in feeds[i]:
-                        video_share_cnt = "0"
+                    # video_title
+                    if 'caption' not in feeds[i]['photo']:
+                        video_title = cls.random_title(log_type, crawler)
+                    elif feeds[i]['photo']['caption'].strip() == "":
+                        video_title = cls.random_title(log_type, crawler)
                     else:
-                        video_share_cnt = feeds[i]["shareCount"]
+                        video_title = cls.video_title(log_type, crawler, feeds[i]['photo']['caption'])
 
-                    if "commentCount" not in feeds[i]:
-                        video_comment_cnt = "0"
-                    else:
-                        video_comment_cnt = feeds[i]["commentCount"]
+                    if 'videoResource' not in feeds[i]['photo'] \
+                            and 'manifest' not in feeds[i]['photo']\
+                            and 'manifestH265'not in feeds[i]['photo']:
+                        Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
+                        break
+                    videoResource = feeds[i]['photo']['videoResource']
 
-                    if "duration" not in feeds[i]:
-                        video_duration = "0"
-                    else:
-                        video_duration = int(int(feeds[i]["duration"]) / 1000)
+                    if 'h264' not in videoResource and 'hevc' not in videoResource:
+                        Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
+                        break
 
-                    if "width" not in feeds[i] or "height" not in feeds[i]:
-                        video_width = "0"
-                        video_height = "0"
+                    # video_id
+                    if 'h264' in videoResource and 'videoId' in videoResource['h264']:
+                        video_id = videoResource['h264']['videoId']
+                    elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
+                        video_id = videoResource['hevc']['videoId']
                     else:
-                        video_width = feeds[i]["width"]
-                        video_height = feeds[i]["height"]
+                        video_id = ""
 
-                    if "timestamp" not in feeds[i]:
-                        video_send_time = "0"
+                    # play_cnt
+                    if 'viewCount' not in feeds[i]['photo']:
+                        play_cnt = 0
                     else:
-                        video_send_time = feeds[i]["timestamp"]
-                    cls.send_time = int(int(video_send_time) / 1000)
+                        play_cnt = int(feeds[i]['photo']['viewCount'])
 
-                    if "userName" not in feeds[i]:
-                        user_name = "0"
+                    # like_cnt
+                    if 'realLikeCount' not in feeds[i]['photo']:
+                        like_cnt = 0
                     else:
-                        user_name = feeds[i]["userName"].strip().replace("\n", "") \
-                            .replace("/", "").replace("快手", "").replace(" ", "") \
-                            .replace(" ", "").replace("&NBSP", "").replace("\r", "")
+                        like_cnt = feeds[i]['photo']['realLikeCount']
 
-                    if "userId" not in feeds[i]:
-                        user_id = "0"
+                    # publish_time
+                    if 'timestamp' not in feeds[i]['photo']:
+                        publish_time_stamp = 0
+                        publish_time_str = ''
+                        publish_time = 0
                     else:
-                        user_id = feeds[i]["userId"]
+                        publish_time_stamp = int(int(feeds[i]['photo']['timestamp'])/1000)
+                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                        publish_time = int((int(time.time()) - publish_time_stamp) / (3600*24))
 
-                    if "headUrl" not in feeds[i]:
-                        head_url = "0"
+                    # duration
+                    if 'duration' not in feeds[i]['photo']:
+                        duration = 0
                     else:
-                        head_url = feeds[i]["headUrl"]
-
-                    if "webpCoverUrls" in feeds[i]:
-                        cover_url = feeds[i]["webpCoverUrls"][-1]["url"]
-                    elif "coverUrls" not in feeds[i]:
-                        cover_url = "0"
-                    elif len(feeds[i]["coverUrls"]) == 0:
-                        cover_url = "0"
+                        duration = int(int(feeds[i]['photo']['duration'])/1000)
+
+                    # video_width / video_height / video_url
+                    mapping = {}
+                    for item in ['width', 'height', 'url']:
+                        try:
+                            val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
+                        except Exception:
+                            val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
+                        except:
+                            val = ''
+                        mapping[item] = val
+                    video_width = int(mapping['width']) if mapping['width'] != '' else 0
+                    video_height = int(mapping['height']) if mapping['height'] != '' else 0
+                    video_url = mapping['url']
+
+                    # cover_url
+                    if 'coverUrl' not in feeds[i]['photo']:
+                        cover_url = ""
                     else:
-                        cover_url = feeds[i]["coverUrls"][0]["url"]
-
-                    if "mainMvUrls" not in feeds[i]:
-                        video_url = "0"
-                    elif len(feeds[i]["mainMvUrls"]) == 0:
-                        video_url = "0"
-                    else:
-                        video_url = feeds[i]["mainMvUrls"][0]["url"]
-
-                    Common.logger(log_type).info("video_title:{}".format(video_title))
-                    Common.logger(log_type).info("user_name:{}".format(user_name))
-                    Common.logger(log_type).info("video_play_cnt:{}".format(video_play_cnt))
-                    Common.logger(log_type).info("video_like_cnt:{}".format(video_like_cnt))
-                    Common.logger(log_type).info("video_duration:{}秒".format(video_duration))
-                    Common.logger(log_type).info("video_send_time:{}".format(
-                        time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(int(video_send_time) / 1000))))
-                    Common.logger(log_type).info("video_url:{}".format(video_url))
-
-                    # 过滤无效视频
-                    if video_id == "0" \
-                            or head_url == "0" \
-                            or cover_url == "0" \
-                            or video_url == "0" \
-                            or video_duration == "0" \
-                            or video_send_time == "0" \
-                            or user_name == "0" \
-                            or user_id == "0" \
-                            or video_title == "":
-                        Common.logger(log_type).info("无效视频\n")
-                    # 视频发布时间 <= 7 天
-                    elif int(time.time()) - int(int(video_send_time) / 1000) > 604800:
-                        Common.logger("follow").info("发布时间:{},超过7天\n", time.strftime(
-                            "%Y/%m/%d %H:%M:%S", time.localtime(int(video_send_time) / 1000)))
-                        cls.person_pcursor = ""
-                        return
-                    # 判断敏感词
-                    elif cls.download_rule(video_duration, video_width, video_height, video_play_cnt,
-                                           video_like_cnt, video_share_cnt) is False:
-                        Common.logger(log_type).info("不满足下载规则\n".format(kuaishou_title))
-                    elif any(word if word in kuaishou_title else False for word in cls.sensitive_words()) is True:
-                        Common.logger(log_type).info("视频已中敏感词:{}\n".format(kuaishou_title))
-                    # 从云文档去重: 推荐榜_已下载表
-                    elif str(video_id) in [j for m in Feishu.get_values_batch(log_type, "kuaishou", "3cd128") for j in m]:
-                        Common.logger(log_type).info("该视频已下载:{}\n", video_title)
-                    # 从云文档去重: 用户主页_已下载表
-                    elif str(video_id) in [j for m in Feishu.get_values_batch(log_type, "kuaishou", "fYdA8F") for j in m]:
-                        Common.logger(log_type).info("该视频已下载:{}\n", video_title)
-                    # 从云文档去重:用户主页_feeds
-                    elif str(video_id) in [j for n in Feishu.get_values_batch(log_type, "kuaishou", "wW5cyb") for j in n]:
-                        Common.logger(log_type).info("该视频已在feeds中:{}\n", video_title)
+                        cover_url = feeds[i]['photo']['coverUrl']
+
+                    # user_name / avatar_url
+                    try:
+                        user_name = feeds[i]['author']['name']
+                        avatar_url = feeds[i]['author']['headerUrl']
+                    except Exception:
+                        user_name = ''
+                        avatar_url = ''
+
+                    video_dict = {'video_title': video_title,
+                                  'video_id': video_id,
+                                  'play_cnt': play_cnt,
+                                  'comment_cnt': 0,
+                                  'like_cnt': like_cnt,
+                                  'share_cnt': 0,
+                                  'video_width': video_width,
+                                  'video_height': video_height,
+                                  'duration': duration,
+                                  'publish_time': publish_time,
+                                  'publish_time_stamp': publish_time_stamp,
+                                  'publish_time_str': publish_time_str,
+                                  'user_name': user_name,
+                                  'user_id': out_uid,
+                                  'avatar_url': avatar_url,
+                                  'cover_url': cover_url,
+                                  'video_url': video_url,
+                                  'session': f"kuaishou{int(time.time())}"}
+
+                    rule_1 = cls.download_rule(video_dict, rule_dict_1)
+                    Common.logger(log_type, crawler).info(f"video_title:{video_title}")
+                    Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
+
+                    Common.logger(log_type, crawler).info(f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
+                    Common.logger(log_type, crawler).info(f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
+                    Common.logger(log_type, crawler).info(f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
+                    Common.logger(log_type, crawler).info(f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
+                    Common.logger(log_type, crawler).info(f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
+                    Common.logger(log_type, crawler).info(f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
+                    Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
+
+                    rule_2 = cls.download_rule(video_dict, rule_dict_2)
+                    Common.logger(log_type, crawler).info(f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
+                    Common.logger(log_type, crawler).info(f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
+                    Common.logger(log_type, crawler).info(f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
+                    Common.logger(log_type, crawler).info(f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
+                    Common.logger(log_type, crawler).info(f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
+                    Common.logger(log_type, crawler).info(f"publish_time:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
+                    Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
+
+                    if video_title == "" or video_url == "":
+                        Common.logger(log_type, crawler).info("无效视频\n")
+                        break
+                    elif rule_1 is True:
+                        if download_cnt_1 < int(rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
+                            download_finished = cls.download_publish(log_type=log_type,
+                                                                     crawler=crawler,
+                                                                     strategy=strategy,
+                                                                     video_dict=video_dict,
+                                                                     rule_dict=rule_dict_1,
+                                                                     our_uid=our_uid,
+                                                                     oss_endpoint=oss_endpoint,
+                                                                     env=env,
+                                                                     machine=machine)
+                            if download_finished is True:
+                                download_cnt_1 += 1
+                    elif rule_2 is True:
+                        if download_cnt_2 < int(rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
+                            download_finished = cls.download_publish(log_type=log_type,
+                                                                     crawler=crawler,
+                                                                     strategy=strategy,
+                                                                     video_dict=video_dict,
+                                                                     rule_dict=rule_dict_2,
+                                                                     our_uid=our_uid,
+                                                                     oss_endpoint=oss_endpoint,
+                                                                     env=env,
+                                                                     machine=machine)
+                            if download_finished is True:
+                                download_cnt_2 += 1
                     else:
-                        Feishu.insert_columns("follow", "kuaishou", "wW5cyb", "ROWS", 1, 2)
-                        # 获取当前时间
-                        get_feeds_time = int(time.time())
-                        # 工作表中写入数据
-                        values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(int(get_feeds_time))),
-                                   "用户主页",
-                                   str(video_id),
-                                   video_title,
-                                   video_play_cnt,
-                                   video_comment_cnt,
-                                   video_like_cnt,
-                                   video_share_cnt,
-                                   video_duration,
-                                   str(video_width) + "*" + str(video_height),
-                                   time.strftime(
-                                       "%Y/%m/%d %H:%M:%S", time.localtime(int(video_send_time) / 1000)),
-                                   user_name,
-                                   user_id,
-                                   head_url,
-                                   cover_url,
-                                   video_url]]
-                        # 等待 1s,防止操作云文档太频繁,导致报错
-                        time.sleep(1)
-                        Feishu.update_values("follow", "kuaishou", "wW5cyb", "A2:T2", values)
-                        Common.logger("follow").info("添加视频至follow_feeds成功:{}\n", video_title)
-                        cls.get_person_video_count.append(video_id)
-
-                        # # 抓取足够多数量的视频
-                        # if len(cls.get_person_video_count) >= 1:
-                        #     Common.logger(log_type).info('已抓取{}:{}条视频\n', user_name, len(cls.get_person_video_count))
-                        #     cls.person_pcursor = ""
-                        #     cls.get_person_video_count = []
-                        #     return
-                if r.json()["pcursor"] == 'no_more':
-                    Common.logger(log_type).info('没有更多作品了\n')
+                        Common.logger(log_type, crawler).info("不满足下载规则\n")
+                if pcursor == "no_more":
+                    Common.logger(log_type, crawler).info("已经到底了,没有更多内容了\n")
                     return
-                elif len(cls.get_person_video_count) < 1:
-                    Common.logger(log_type).info('休眠 10-20 秒,翻页')
-                    time.sleep(random.randint(10, 20))
-                    # 翻页
-                    cls.person_pcursor = r.json()["pcursor"]
-                    cls.get_user_videos(log_type, uid)
 
-        except Exception as e:
-            Common.logger(log_type).error("get_videos_from_person异常:{}\n", e)
-
-    # 获取所有关注列表的用户视频
     @classmethod
-    def get_videos_from_follow(cls, log_type, env):
-        try:
-            user_list = cls.get_follow_users(log_type)
-            if len(user_list) == 0:
-                Common.logger(log_type).warning('用户ID列表为空\n')
-            else:
-                while True:
-                    for k, v in user_list.items():
-                        Common.logger(log_type).info('正在获取 {} 主页视频\n', k)
-                        cls.person_pcursor = ""
-                        cls.get_user_videos(log_type, str(v))
-                        cls.run_download_publish(log_type, env)
-                        if len(cls.get_all_video_count) >= 100:
-                            cls.get_all_video_count = []
-                            Common.logger(log_type).info('今日已抓取{}条视频\n', len(cls.get_all_video_count))
-                            return
-                        else:
-                            Common.logger(log_type).info('随机休眠 10-30 秒\n')
-                            time.sleep(random.randint(10, 30))
-        except Exception as e:
-            Common.logger(log_type).error('get_videos_from_follow异常:{}\n', e)
+    def repeat_video(cls, log_type, crawler, video_id, env, machine):
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
+        return len(repeat_video)
 
-    # 下载/上传
     @classmethod
-    def download_publish(cls, log_type, env):
-        try:
-            follow_feeds_sheet = Feishu.get_values_batch(log_type, "kuaishou", "wW5cyb")
-            for i in range(1, len(follow_feeds_sheet)):
-                time.sleep(1)
-                download_video_id = follow_feeds_sheet[i][2]
-                download_video_title = follow_feeds_sheet[i][3]
-                download_video_play_cnt = follow_feeds_sheet[i][4]
-                download_video_comment_cnt = follow_feeds_sheet[i][5]
-                download_video_like_cnt = follow_feeds_sheet[i][6]
-                download_video_share_cnt = follow_feeds_sheet[i][7]
-                download_video_duration = follow_feeds_sheet[i][8]
-                download_video_resolution = follow_feeds_sheet[i][9]
-                download_video_send_time = follow_feeds_sheet[i][10]
-                download_user_name = follow_feeds_sheet[i][11]
-                download_user_id = follow_feeds_sheet[i][12]
-                download_head_url = follow_feeds_sheet[i][13]
-                download_cover_url = follow_feeds_sheet[i][14]
-                download_video_url = follow_feeds_sheet[i][15]
-
-                Common.logger(log_type).info("正在判断第{}行,视频:{}", i + 1, download_video_title)
-
-                # 过滤空行及空标题视频
-                if download_video_id is None \
-                        or download_video_id == "" \
-                        or download_video_title is None \
-                        or download_video_title == "":
-                    # 删除行或列,可选 ROWS、COLUMNS
-                    Feishu.dimension_range(log_type, "kuaishou", "wW5cyb", "ROWS", i + 1, i + 1)
-                    Common.logger(log_type).warning("标题为空或空行,删除成功\n")
-                    return
-
-                # 从已下载视频表中去重:推荐榜_已下载表
-                elif str(download_video_id) in [j for m in Feishu.get_values_batch(
-                        log_type, "kuaishou", "3cd128") for j in m]:
-                    # 删除行或列,可选 ROWS、COLUMNS
-                    Feishu.dimension_range(log_type, "kuaishou", "wW5cyb", "ROWS", i + 1, i + 1)
-                    Common.logger(log_type).info("视频已下载:{},删除成功\n", download_video_title)
-                    return
-
-                # 从已下载视频表中去重:用户主页_已下载表
-                elif str(download_video_id) in [j for m in Feishu.get_values_batch(
-                        log_type, "kuaishou", "fYdA8F") for j in m]:
-                    # 删除行或列,可选 ROWS、COLUMNS
-                    Feishu.dimension_range(log_type, "kuaishou", "wW5cyb", "ROWS", i + 1, i + 1)
-                    Common.logger(log_type).info("视频已下载:{},删除成功\n", download_video_title)
-                    return
-
-                else:
-                    # 下载封面
-                    Common.download_method(log_type=log_type, text="cover",
-                                           d_name=str(download_video_title), d_url=str(download_cover_url))
-                    # 下载视频
-                    Common.download_method(log_type=log_type, text="video",
-                                           d_name=str(download_video_title), d_url=str(download_video_url))
-                    # 保存视频信息至 "./videos/{download_video_title}/info.txt"
-                    with open("./videos/" + download_video_title + "/" + "info.txt",
-                              "a", encoding="UTF-8") as f_a:
-                        f_a.write(str(download_video_id) + "\n" +
-                                  str(download_video_title) + "\n" +
-                                  str(download_video_duration) + "\n" +
-                                  str(download_video_play_cnt) + "\n" +
-                                  str(download_video_comment_cnt) + "\n" +
-                                  str(download_video_like_cnt) + "\n" +
-                                  str(download_video_share_cnt) + "\n" +
-                                  str(download_video_resolution) + "\n" +
-                                  str(int(time.mktime(
-                                      time.strptime(download_video_send_time, "%Y/%m/%d %H:%M:%S")))) + "\n" +
-                                  str(download_user_name) + "\n" +
-                                  str(download_head_url) + "\n" +
-                                  str(download_video_url) + "\n" +
-                                  str(download_cover_url) + "\n" +
-                                  "kuaishou_person")
-                    Common.logger(log_type).info("==========视频信息已保存至info.txt==========")
-
-                    # 上传视频
-                    Common.logger(log_type).info("开始上传视频:{}".format(download_video_title))
-                    our_video_id = Publish.upload_and_publish(log_type, env, "play")
-                    our_video_link = "https://admin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info"
-                    Common.logger(log_type).info("视频上传完成:{}", download_video_title)
-
-                    # 视频ID工作表,插入首行
-                    time.sleep(1)
-                    Feishu.insert_columns(log_type, "kuaishou", "fYdA8F", "ROWS", 1, 2)
-                    # 视频ID工作表,首行写入数据
-                    upload_time = int(time.time())
-                    values = [[our_video_id,
-                               time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(upload_time)),
-                               "用户主页",
-                               str(download_video_id),
-                               str(download_video_title),
-                               our_video_link,
-                               download_video_play_cnt,
-                               download_video_comment_cnt,
-                               download_video_like_cnt,
-                               download_video_share_cnt,
-                               download_video_duration,
-                               str(download_video_resolution),
-                               str(download_video_send_time),
-                               str(download_user_name),
-                               str(download_user_id),
-                               str(download_head_url),
-                               str(download_cover_url),
-                               str(download_video_url)]]
-                    time.sleep(1)
-                    Feishu.update_values(log_type, "kuaishou", "fYdA8F", "E2:Z2", values)
-                    cls.get_all_video_count.append(download_video_id)
-                    Common.logger(log_type).info("保存视频ID至已下载云文档成功:{}", download_video_title)
-
-                    # 删除行或列,可选 ROWS、COLUMNS
-                    Feishu.dimension_range(log_type, "kuaishou", "wW5cyb", "ROWS", i + 1, i + 1)
-                    Common.logger(log_type).info("视频:{},下载/上传成功\n", download_video_title)
-                    return
-        except Exception as e:
-            Feishu.dimension_range(log_type, "kuaishou", "wW5cyb", "ROWS", 2, 2)
-            Common.logger(log_type).error("download_publish异常,删除成功:{}\n", e)
+    def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
+        download_finished = False
+        if cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
+            Common.logger(log_type, crawler).info('视频已下载\n')
+        elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, crawler, "3cd128") for x in y]:
+            Common.logger(log_type, crawler).info('视频已下载\n')
+        elif any(word if word in video_dict['video_title'] else False for word in cls.filter_words(log_type, crawler)) is True:
+            Common.logger(log_type, crawler).info('标题已中过滤词\n')
+        else:
+            # 下载封面
+            Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
+            # 下载视频
+            Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
+            # 保存视频信息至txt
+            Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+            # 上传视频
+            Common.logger(log_type, crawler).info("开始上传视频...")
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy=strategy,
+                                                      our_uid=our_uid,
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+            if env == 'dev':
+                our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+            else:
+                our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+            Common.logger(log_type, crawler).info("视频上传完成")
+
+            if our_video_id is None:
+                Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+                return download_finished
+
+            # 视频信息保存数据库
+            insert_sql = f""" insert into crawler_video(video_id,
+                                                    user_id,
+                                                    out_user_id,
+                                                    platform,
+                                                    strategy,
+                                                    out_video_id,
+                                                    video_title,
+                                                    cover_url,
+                                                    video_url,
+                                                    duration,
+                                                    publish_time,
+                                                    play_cnt,
+                                                    crawler_rule,
+                                                    width,
+                                                    height)
+                                                    values({our_video_id},
+                                                    {our_uid},
+                                                    "{video_dict['user_id']}",
+                                                    "{cls.platform}",
+                                                    "定向爬虫策略",
+                                                    "{video_dict['video_id']}",
+                                                    "{video_dict['video_title']}",
+                                                    "{video_dict['cover_url']}",
+                                                    "{video_dict['video_url']}",
+                                                    {int(video_dict['duration'])},
+                                                    "{video_dict['publish_time_str']}",
+                                                    {int(video_dict['play_cnt'])},
+                                                    '{json.dumps(rule_dict)}',
+                                                    {int(video_dict['video_width'])},
+                                                    {int(video_dict['video_height'])}) """
+            Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+            MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
+            Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+
+            # 视频写入飞书
+            Feishu.insert_columns(log_type, 'kuaishou', "fYdA8F", "ROWS", 1, 2)
+            upload_time = int(time.time())
+            values = [[our_uid,
+                       time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                       "定向榜",
+                       str(video_dict['video_id']),
+                       video_dict['video_title'],
+                       our_video_link,
+                       video_dict['play_cnt'],
+                       video_dict['comment_cnt'],
+                       video_dict['like_cnt'],
+                       video_dict['share_cnt'],
+                       video_dict['duration'],
+                       f"{video_dict['video_width']}*{video_dict['video_height']}",
+                       video_dict['publish_time_str'],
+                       video_dict['user_name'],
+                       video_dict['user_id'],
+                       video_dict['avatar_url'],
+                       video_dict['cover_url'],
+                       video_dict['video_url']]]
+            time.sleep(1)
+            Feishu.update_values(log_type, 'kuaishou', "fYdA8F", "E2:Z2", values)
+            Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+            download_finished = True
+        return download_finished
 
-    # 执行下载/上传
     @classmethod
-    def run_download_publish(cls, log_type, env):
-        try:
-            while True:
-                follow_feeds_sheet = Feishu.get_values_batch(log_type, "kuaishou", "wW5cyb")
-                if len(follow_feeds_sheet) == 1:
-                    Common.logger(log_type).info("下载/上传完成\n")
-                    break
-                else:
-                    cls.download_publish(log_type, env)
-        except Exception as e:
-            Common.logger(log_type).error("run_download_publish异常:{}\n", e)
+    def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
+        user_list = cls.get_user_list(log_type=log_type, crawler=crawler, sheetid="bTSzxW", env=env, machine=machine)
+        for user in user_list:
+            out_uid = user["out_uid"]
+            user_name = user["user_name"]
+            our_uid = user["our_uid"]
+            Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
+            cls.get_videoList(log_type=log_type,
+                              crawler=crawler,
+                              strategy=strategy,
+                              our_uid=our_uid,
+                              out_uid=out_uid,
+                              oss_endpoint=oss_endpoint,
+                              env=env,
+                              machine=machine)
+            time.sleep(3)
 
 
 if __name__ == "__main__":
-    Follow.get_user_videos('follow', '240529022')
+    # print(Follow.filter_words("follow", "kuaishou"))
+    # print(Follow.random_title("follow", "kuaishou"))
+    # Follow.get_user_list("follow", "kuaishou", "2OLxLr", "dev", "local")
+    Follow.get_videoList(log_type="follow",
+                         crawler="kuaishou",
+                         strategy="定向爬虫策略",
+                         our_uid="6282431",
+                         out_uid="3xws7ydsnmp5mgq",
+                         oss_endpoint="out",
+                         env="dev",
+                         machine="local")
+    # Follow.get_rule("follow", "kuaishou", 1)
+    # Follow.get_rule("follow", "kuaishou", 2)
 
     pass
-

+ 0 - 641
kuaishou/kuaishou_follow/kuaishou_follow_pc.py

@@ -1,641 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/2/24
-import os
-import random
-import shutil
-import sys
-import time
-import requests
-import json
-
-sys.path.append(os.getcwd())
-from common.common import Common
-from common.feishu import Feishu
-from common.users import Users
-from common.db import MysqlHelper
-from common.publish import Publish
-
-
-class Follow:
-    platform = "快手"
-    tag = "快手爬虫,定向爬虫策略"
-
-    @classmethod
-    def get_rule(cls, log_type, crawler, index):
-        try:
-            while True:
-                rule_sheet = Feishu.get_values_batch(log_type, crawler, "3iqG4z")
-                if rule_sheet is None:
-                    Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
-                    time.sleep(10)
-                    continue
-                if index == 1:
-                    rule_dict = {
-                        "play_cnt": f"{rule_sheet[1][1]}{rule_sheet[1][2]}",
-                        "video_width": f"{rule_sheet[2][1]}{rule_sheet[2][2]}",
-                        "video_height": f"{rule_sheet[3][1]}{rule_sheet[3][2]}",
-                        "like_cnt": f"{rule_sheet[4][1]}{rule_sheet[4][2]}",
-                        "duration": f"{rule_sheet[5][1]}{rule_sheet[5][2]}",
-                        "download_cnt": f"{rule_sheet[6][1]}{rule_sheet[6][2]}",
-                        "publish_time": f"{rule_sheet[7][1]}{rule_sheet[7][2]}",
-                    }
-                    # for k, v in rule_dict.items():
-                    #     Common.logger(log_type, crawler).info(f"{k}:{v}")
-                    return rule_dict
-                elif index == 2:
-                    rule_dict = {
-                        "play_cnt": f"{rule_sheet[9][1]}{rule_sheet[9][2]}",
-                        "video_width": f"{rule_sheet[10][1]}{rule_sheet[10][2]}",
-                        "video_height": f"{rule_sheet[11][1]}{rule_sheet[11][2]}",
-                        "like_cnt": f"{rule_sheet[12][1]}{rule_sheet[12][2]}",
-                        "duration": f"{rule_sheet[13][1]}{rule_sheet[13][2]}",
-                        "download_cnt": f"{rule_sheet[14][1]}{rule_sheet[14][2]}",
-                        "publish_time": f"{rule_sheet[15][1]}{rule_sheet[15][2]}",
-                    }
-                    # for k, v in rule_dict.items():
-                    #     Common.logger(log_type, crawler).info(f"{k}:{v}")
-                    return rule_dict
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
-
-    @classmethod
-    def download_rule(cls, video_dict, rule_dict):
-        if eval(f"{video_dict['play_cnt']}{rule_dict['play_cnt']}") is True\
-                and eval(f"{video_dict['video_width']}{rule_dict['video_width']}") is True\
-                and eval(f"{video_dict['video_height']}{rule_dict['video_height']}") is True\
-                and eval(f"{video_dict['like_cnt']}{rule_dict['like_cnt']}") is True\
-                and eval(f"{video_dict['duration']}{rule_dict['duration']}") is True\
-                and eval(f"{video_dict['publish_time']}{rule_dict['publish_time']}") is True:
-            return True
-        else:
-            return False
-
-    # 过滤词库
-    @classmethod
-    def filter_words(cls, log_type, crawler):
-        try:
-            while True:
-                filter_words_sheet = Feishu.get_values_batch(log_type, crawler, 'HIKVvs')
-                if filter_words_sheet is None:
-                    Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
-                    continue
-                filter_words_list = []
-                for x in filter_words_sheet:
-                    for y in x:
-                        if y is None:
-                            pass
-                        else:
-                            filter_words_list.append(y)
-                return filter_words_list
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
-
-    # 万能标题
-    @classmethod
-    def random_title(cls, log_type, crawler):
-        try:
-            while True:
-                random_title_sheet = Feishu.get_values_batch(log_type, crawler, '0DiyXe')
-                if random_title_sheet is None:
-                    Common.logger(log_type, crawler).warning(f"filter_words_sheet:{random_title_sheet} 10秒钟后重试")
-                    continue
-                random_title_list = []
-                for x in random_title_sheet:
-                    for y in x:
-                        if y is None:
-                            pass
-                        else:
-                            random_title_list.append(y)
-                return random.choice(random_title_list)
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f'random_title:{e}\n')
-
-    # 获取站外用户信息
-    @classmethod
-    def get_out_user_info(cls, log_type, crawler, out_uid):
-        try:
-            url = "https://www.kuaishou.com/graphql"
-            payload = json.dumps({
-                "operationName": "visionProfile",
-                "variables": {
-                    "userId": out_uid
-                },
-                "query": "query visionProfile($userId: String) {\n  visionProfile(userId: $userId) {\n    result\n    hostName\n    userProfile {\n      ownerCount {\n        fan\n        photo\n        follow\n        photo_public\n        __typename\n      }\n      profile {\n        gender\n        user_name\n        user_id\n        headurl\n        user_text\n        user_profile_bg_url\n        __typename\n      }\n      isFollowing\n      __typename\n    }\n    __typename\n  }\n}\n"
-            })
-            headers = {
-                'Cookie': 'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId=1921947321; kpn=KUAISHOU_VISION; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABE4wGjnJauApJelOpl9Xqo8TVDAyra7Pvo0rZtVgMSZxgVuw4Z6P2UtHv_CHOk2Ne2el1hdE_McCptWs8tRdtYlhXFlVOu8rQX7CwexzOBudJAfB3lDN8LPc4o4qHNwqFxy5J5j_WzdllbqMmaDUK9yUxX6XA-JFezzq9jvBwtGv7_hzB7pFrUcH39z0EYOQaZo5lDl-pE09Gw7wr8NvlZRoSdWlbobCW6oJxuQLJTUr9oj_uIiBhkeb1psaIIc3VwfYQ1UfvobrXAP_WpnRabE_3UZUBOygFMAE; kuaishou.server.web_ph=2b981e2051d7130c977fd31df97fe6f5ad54',
-                'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
-                'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
-                'content-type': 'application/json',
-                # 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
-                # 'Cache-Control': 'no-cache',
-                # 'Connection': 'keep-alive',
-                # 'Origin': 'https://www.kuaishou.com',
-                # 'Pragma': 'no-cache',
-                # 'Sec-Fetch-Dest': 'empty',
-                # 'Sec-Fetch-Mode': 'cors',
-                # 'Sec-Fetch-Site': 'same-origin',
-                # 'accept': '*/*',
-                # 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
-                # 'sec-ch-ua-mobile': '?0',
-                # 'sec-ch-ua-platform': '"macOS"'
-            }
-            response = requests.post(url=url, headers=headers, data=payload)
-            if response.status_code != 200:
-                Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.text}\n")
-                return
-            elif 'data' not in response.json():
-                Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()}\n")
-                return
-            elif 'visionProfile' not in response.json()['data']:
-                Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()['data']}\n")
-                return
-            elif 'userProfile' not in response.json()['data']['visionProfile']:
-                Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()['data']['visionProfile']['userProfile']}\n")
-                return
-            else:
-                userProfile = response.json()['data']['visionProfile']['userProfile']
-                out_user_dict = {}
-                if 'ownerCount' not in userProfile:
-                    out_user_dict['out_fans'] = 0
-                    out_user_dict['out_fans'] = 0
-                elif 'fan' not in userProfile['ownerCount']:
-                    out_user_dict['out_fans'] = 0
-                elif 'follow' not in userProfile['ownerCount']:
-                    out_user_dict['out_fans'] = 0
-                else:
-                    out_fans_str = str(userProfile['ownerCount']['fan'])
-                    out_follow_str = str(userProfile['ownerCount']['follow'])
-                    if "万" in out_fans_str:
-                        out_user_dict['out_fans'] = int(float(out_fans_str.split("万")[0]) * 10000)
-                    else:
-                        out_user_dict['out_fans'] = int(out_fans_str.replace(",", ""))
-                    if "万" in out_follow_str:
-                        out_user_dict['out_follow'] = int(float(out_follow_str.split("万")[0]) * 10000)
-                    else:
-                        out_user_dict['out_follow'] = int(out_follow_str.replace(",", ""))
-
-                if 'profile' not in userProfile:
-                    out_user_dict['out_avatar_url'] = ''
-                elif 'headurl' not in userProfile['profile']:
-                    out_user_dict['out_avatar_url'] = ''
-                else:
-                    out_user_dict['out_avatar_url'] = userProfile['profile']['headurl']
-
-                return out_user_dict
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f"get_out_user_info:{e}\n")
-
-    # 获取用户信息列表
-    @classmethod
-    def get_user_list(cls, log_type, crawler, sheetid, env, machine):
-        try:
-            while True:
-                user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
-                if user_sheet is None:
-                    Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet} 10秒钟后重试")
-                    continue
-                our_user_list = []
-                # for i in range(1, len(user_sheet)):
-                for i in range(1, 2):
-                    out_uid = user_sheet[i][2]
-                    user_name = user_sheet[i][3]
-                    our_uid = user_sheet[i][6]
-                    our_user_link = user_sheet[i][7]
-                    if out_uid is None or user_name is None:
-                        Common.logger(log_type, crawler).info("空行\n")
-                    else:
-                        Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息\n")
-                        if our_uid is None:
-                            out_user_info = cls.get_out_user_info(log_type, crawler, out_uid)
-                            out_user_dict = {
-                                "out_uid": out_uid,
-                                "user_name": user_name,
-                                "out_avatar_url": out_user_info["out_avatar_url"],
-                                "out_create_time": '',
-                                "out_tag": '',
-                                "out_play_cnt": 0,
-                                "out_fans": out_user_info["out_fans"],
-                                "out_follow": out_user_info["out_follow"],
-                                "out_friend": 0,
-                                "out_like": 0,
-                                "platform": cls.platform,
-                                "tag": cls.tag,
-                            }
-                            our_user_dict = Users.create_user(log_type=log_type, crawler=crawler,
-                                                              out_user_dict=out_user_dict, env=env, machine=machine)
-                            our_uid = our_user_dict['our_uid']
-                            our_user_link = our_user_dict['our_user_link']
-                            Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
-                                                 [[our_uid, our_user_link]])
-                            Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
-                            our_user_list.append(our_user_dict)
-                        else:
-                            our_user_dict = {
-                                'out_uid': out_uid,
-                                'user_name': user_name,
-                                'our_uid': our_uid,
-                                'our_user_link': our_user_link,
-                            }
-                            our_user_list.append(our_user_dict)
-                return our_user_list
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f'get_user_list:{e}\n')
-
-    # 处理视频标题
-    @classmethod
-    def video_title(cls, log_type, crawler, title):
-        title_split1 = title.split(" #")
-        if title_split1[0] != "":
-            title1 = title_split1[0]
-        else:
-            title1 = title_split1[-1]
-
-        title_split2 = title1.split(" #")
-        if title_split2[0] != "":
-            title2 = title_split2[0]
-        else:
-            title2 = title_split2[-1]
-
-        title_split3 = title2.split("@")
-        if title_split3[0] != "":
-            title3 = title_split3[0]
-        else:
-            title3 = title_split3[-1]
-
-        video_title = title3.strip().replace("\n", "") \
-                          .replace("/", "").replace("快手", "").replace(" ", "") \
-                          .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
-                          .replace("#", "").replace(".", "。").replace("\\", "") \
-                          .replace(":", "").replace("*", "").replace("?", "") \
-                          .replace("?", "").replace('"', "").replace("<", "") \
-                          .replace(">", "").replace("|", "").replace("@", "")[:40]
-        if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
-            return cls.random_title(log_type, crawler)
-        else:
-            return video_title
-
-    @classmethod
-    def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
-        download_cnt_1, download_cnt_2 = 0, 0
-        pcursor = ""
-
-        while True:
-            rule_dict_1 = cls.get_rule(log_type, crawler, 1)
-            rule_dict_2 = cls.get_rule(log_type, crawler, 2)
-            if rule_dict_1 is None or rule_dict_2 is None:
-                Common.logger(log_type, crawler).warning(f"rule_dict is None, 10秒后重试")
-                time.sleep(10)
-            else:
-                break
-
-        while True:
-            if download_cnt_1 >=int(rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]) and download_cnt_2 >= int(rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
-                Common.logger(log_type, crawler).info(f"规则1已下载{download_cnt_1}条视频,规则2已下载{download_cnt_2}条视频\n")
-                return
-            url = "https://www.kuaishou.com/graphql"
-            payload = json.dumps({
-                "operationName": "visionProfilePhotoList",
-                "variables": {
-                    "userId": out_uid,
-                    "pcursor": pcursor,
-                    "page": "profile"
-                },
-                "query": "fragment photoContent on PhotoEntity {\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  __typename\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n  visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n    result\n    llsid\n    webPageArea\n    feeds {\n      ...feedContent\n      __typename\n    }\n    hostName\n    pcursor\n    __typename\n  }\n}\n"
-            })
-            headers = {
-                'Cookie': 'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId=1268646616; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABOLgYYcIJ5ilxU46Jc-HLWThY8sppX3V0htC_KhSGOzAjP2hAOdegzfkZGAxS5rf6rCBS487FkxfYzLkV__I6b1lK16rDjvv94Kkoo4z7mgf8y8rFgWoqrp81JAWTtx00y-wrc1XXPf9RAVQoET70wWaeNG2r5bxtZEiNwpK_zPi0ZdUo0BW13dFKfVssAy2xKYh0UlJ8VSd_vBvyMKSxVBoSf061Kc3w5Nem7YdpVBmH39ceIiBpiGioLzbZqlHiSbwkH_LhUhNXz3o7LITj098KUytk2CgFMAE; kuaishou.server.web_ph=f1033957981996a7d50e849a9ded4cf4adff; kpn=KUAISHOU_VISION',
-                'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
-                'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
-                'content-type': 'application/json',
-                # 'accept': '*/*',
-                # 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
-                # 'Cache-Control': 'no-cache',
-                # 'Connection': 'keep-alive',
-                # 'Origin': 'https://www.kuaishou.com',
-                # 'Pragma': 'no-cache',
-                # 'Sec-Fetch-Dest': 'empty',
-                # 'Sec-Fetch-Mode': 'cors',
-                # 'Sec-Fetch-Site': 'same-origin',
-                # 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
-                # 'sec-ch-ua-mobile': '?0',
-                # 'sec-ch-ua-platform': '"macOS"'
-            }
-            response = requests.post(url=url, headers=headers, data=payload)
-            if response.status_code != 200:
-                Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.text}\n")
-                return
-            elif 'data' not in response.json():
-                Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
-                return
-            elif 'visionProfilePhotoList' not in response.json()['data']:
-                Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
-                return
-            elif 'feeds' not in response.json()['data']['visionProfilePhotoList']:
-                Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']['visionProfilePhotoList']}\n")
-                return
-            elif len(response.json()['data']['visionProfilePhotoList']['feeds']) == 0:
-                Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
-                return
-            else:
-                feeds = response.json()['data']['visionProfilePhotoList']['feeds']
-                pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
-                for i in range(len(feeds)):
-                    if 'photo' not in feeds[i]:
-                        Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]}\n")
-                        break
-
-                    # video_title
-                    if 'caption' not in feeds[i]['photo']:
-                        video_title = cls.random_title(log_type, crawler)
-                    elif feeds[i]['photo']['caption'].strip() == "":
-                        video_title = cls.random_title(log_type, crawler)
-                    else:
-                        video_title = cls.video_title(log_type, crawler, feeds[i]['photo']['caption'])
-
-                    if 'videoResource' not in feeds[i]['photo'] \
-                            and 'manifest' not in feeds[i]['photo']\
-                            and 'manifestH265'not in feeds[i]['photo']:
-                        Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
-                        break
-                    videoResource = feeds[i]['photo']['videoResource']
-
-                    if 'h264' not in videoResource and 'hevc' not in videoResource:
-                        Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
-                        break
-
-                    # video_id
-                    if 'h264' in videoResource and 'videoId' in videoResource['h264']:
-                        video_id = videoResource['h264']['videoId']
-                    elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
-                        video_id = videoResource['hevc']['videoId']
-                    else:
-                        video_id = ""
-
-                    # play_cnt
-                    if 'viewCount' not in feeds[i]['photo']:
-                        play_cnt = 0
-                    else:
-                        play_cnt = int(feeds[i]['photo']['viewCount'])
-
-                    # like_cnt
-                    if 'realLikeCount' not in feeds[i]['photo']:
-                        like_cnt = 0
-                    else:
-                        like_cnt = feeds[i]['photo']['realLikeCount']
-
-                    # publish_time
-                    if 'timestamp' not in feeds[i]['photo']:
-                        publish_time_stamp = 0
-                        publish_time_str = ''
-                        publish_time = 0
-                    else:
-                        publish_time_stamp = int(int(feeds[i]['photo']['timestamp'])/1000)
-                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-                        publish_time = int((int(time.time()) - publish_time_stamp) / (3600*24))
-
-                    # duration
-                    if 'duration' not in feeds[i]['photo']:
-                        duration = 0
-                    else:
-                        duration = int(int(feeds[i]['photo']['duration'])/100)
-
-                    # video_width / video_height / video_url
-                    mapping = {}
-                    for item in ['width', 'height', 'url']:
-                        try:
-                            val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
-                        except Exception:
-                            val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
-                        except:
-                            val = ''
-                        mapping[item] = val
-                    video_width = int(mapping['width']) if mapping['width'] != '' else 0
-                    video_height = int(mapping['height']) if mapping['height'] != '' else 0
-                    video_url = mapping['url']
-
-                    # cover_url
-                    if 'coverUrl' not in feeds[i]['photo']:
-                        cover_url = ""
-                    else:
-                        cover_url = feeds[i]['photo']['coverUrl']
-
-                    # user_name / avatar_url
-                    try:
-                        user_name = feeds[i]['author']['name']
-                        avatar_url = feeds[i]['author']['headerUrl']
-                    except Exception:
-                        user_name = ''
-                        avatar_url = ''
-
-                    video_dict = {'video_title': video_title,
-                                  'video_id': video_id,
-                                  'play_cnt': play_cnt,
-                                  'comment_cnt': 0,
-                                  'like_cnt': like_cnt,
-                                  'share_cnt': 0,
-                                  'video_width': video_width,
-                                  'video_height': video_height,
-                                  'duration': duration,
-                                  'publish_time': publish_time,
-                                  'publish_time_stamp': publish_time_stamp,
-                                  'publish_time_str': publish_time_str,
-                                  'user_name': user_name,
-                                  'user_id': out_uid,
-                                  'avatar_url': avatar_url,
-                                  'cover_url': cover_url,
-                                  'video_url': video_url,
-                                  'session': f"kuaishou{int(time.time())}"}
-
-                    rule_1 = cls.download_rule(video_dict, rule_dict_1)
-                    Common.logger(log_type, crawler).info(f"video_title:{video_title}")
-                    Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
-
-                    Common.logger(log_type, crawler).info(f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
-                    Common.logger(log_type, crawler).info(f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
-                    Common.logger(log_type, crawler).info(f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
-                    Common.logger(log_type, crawler).info(f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
-                    Common.logger(log_type, crawler).info(f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
-                    Common.logger(log_type, crawler).info(f"publish_time_str:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
-                    Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
-
-                    rule_2 = cls.download_rule(video_dict, rule_dict_2)
-                    Common.logger(log_type, crawler).info(f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
-                    Common.logger(log_type, crawler).info(f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
-                    Common.logger(log_type, crawler).info(f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
-                    Common.logger(log_type, crawler).info(f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
-                    Common.logger(log_type, crawler).info(f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
-                    Common.logger(log_type, crawler).info(f"publish_time_str:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
-                    Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
-
-                    if video_title == "" or video_url == "":
-                        Common.logger(log_type, crawler).info("无效视频\n")
-                        break
-                    elif rule_1 is True:
-                        if download_cnt_1 < int(rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
-                            cls.download_publish(log_type=log_type,
-                                                 crawler=crawler,
-                                                 strategy=strategy,
-                                                 video_dict=video_dict,
-                                                 rule_dict=rule_dict_1,
-                                                 our_uid=our_uid,
-                                                 oss_endpoint=oss_endpoint,
-                                                 env=env,
-                                                 machine=machine)
-                            download_cnt_1 += 1
-                    elif rule_2 is True:
-                        if download_cnt_2 < int(rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
-                            cls.download_publish(log_type=log_type,
-                                                 crawler=crawler,
-                                                 strategy=strategy,
-                                                 video_dict=video_dict,
-                                                 rule_dict=rule_dict_2,
-                                                 our_uid=our_uid,
-                                                 oss_endpoint=oss_endpoint,
-                                                 env=env,
-                                                 machine=machine)
-                            download_cnt_2 += 1
-                if pcursor == "no_more":
-                    Common.logger(log_type, crawler).info("已经到底了,没有更多内容了\n")
-                    return
-
-    @classmethod
-    def repeat_video(cls, log_type, crawler, video_id, env, machine):
-        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
-        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
-        return len(repeat_video)
-
-    @classmethod
-    def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
-        if cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
-            Common.logger(log_type, crawler).info('视频已下载\n')
-        elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, crawler, "3cd128") for x in y]:
-            Common.logger(log_type, crawler).info('视频已下载\n')
-        elif any(word if word in video_dict['video_title'] else False for word in cls.filter_words(log_type, crawler)) is True:
-            Common.logger(log_type, crawler).info('标题已中过滤词\n')
-        else:
-            # 下载封面
-            Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
-            # 下载视频
-            Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
-            # 保存视频信息至txt
-            Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
-
-            # 上传视频
-            Common.logger(log_type, crawler).info("开始上传视频...")
-            our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                      crawler=crawler,
-                                                      strategy=strategy,
-                                                      our_uid=our_uid,
-                                                      env=env,
-                                                      oss_endpoint=oss_endpoint)
-            if env == 'dev':
-                our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-            else:
-                our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-            Common.logger(log_type, crawler).info("视频上传完成")
-
-            if our_video_id is None:
-                Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
-                # 删除视频文件夹
-                shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-                return
-
-            # 视频信息保存数据库
-            insert_sql = f""" insert into crawler_video(video_id,
-                                                    user_id,
-                                                    out_user_id,
-                                                    platform,
-                                                    strategy,
-                                                    out_video_id,
-                                                    video_title,
-                                                    cover_url,
-                                                    video_url,
-                                                    duration,
-                                                    publish_time,
-                                                    play_cnt,
-                                                    crawler_rule,
-                                                    width,
-                                                    height)
-                                                    values({our_video_id},
-                                                    {our_uid},
-                                                    "{video_dict['user_id']}",
-                                                    "{cls.platform}",
-                                                    "定向爬虫策略",
-                                                    "{video_dict['video_id']}",
-                                                    "{video_dict['video_title']}",
-                                                    "{video_dict['cover_url']}",
-                                                    "{video_dict['video_url']}",
-                                                    {int(video_dict['duration'])},
-                                                    "{video_dict['publish_time_str']}",
-                                                    {int(video_dict['play_cnt'])},
-                                                    '{json.dumps(rule_dict)}',
-                                                    {int(video_dict['video_width'])},
-                                                    {int(video_dict['video_height'])}) """
-            Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-            MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
-            Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
-
-            # 视频写入飞书
-            Feishu.insert_columns(log_type, 'kuaishou', "fYdA8F", "ROWS", 1, 2)
-            upload_time = int(time.time())
-            values = [[our_uid,
-                       time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
-                       "定向榜",
-                       str(video_dict['video_id']),
-                       video_dict['video_title'],
-                       our_video_link,
-                       video_dict['play_cnt'],
-                       video_dict['comment_cnt'],
-                       video_dict['like_cnt'],
-                       video_dict['share_cnt'],
-                       video_dict['duration'],
-                       f"{video_dict['video_width']}*{video_dict['video_height']}",
-                       video_dict['publish_time_str'],
-                       video_dict['user_name'],
-                       video_dict['user_id'],
-                       video_dict['avatar_url'],
-                       video_dict['cover_url'],
-                       video_dict['video_url']]]
-            time.sleep(1)
-            Feishu.update_values(log_type, 'kuaishou', "fYdA8F", "E2:Z2", values)
-            Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
-
-    @classmethod
-    def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
-        user_list = cls.get_user_list(log_type=log_type, crawler=crawler, sheetid="bTSzxW", env=env, machine=machine)
-        for user in user_list:
-            out_uid = user["out_uid"]
-            user_name = user["user_name"]
-            our_uid = user["our_uid"]
-            Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
-            cls.get_videoList(log_type=log_type,
-                              crawler=crawler,
-                              strategy=strategy,
-                              our_uid=our_uid,
-                              out_uid=out_uid,
-                              oss_endpoint=oss_endpoint,
-                              env=env,
-                              machine=machine)
-            cls.pcursor = ""
-            cls.download_cnt = 0
-            time.sleep(3)
-
-
-if __name__ == "__main__":
-    # print(Follow.filter_words("follow", "kuaishou"))
-    # print(Follow.random_title("follow", "kuaishou"))
-    # Follow.get_user_list("follow", "kuaishou", "2OLxLr", "dev", "local")
-    Follow.get_videoList(log_type="follow",
-                         crawler="kuaishou",
-                         strategy="定向爬虫策略",
-                         our_uid="6282431",
-                         out_uid="3xws7ydsnmp5mgq",
-                         oss_endpoint="out",
-                         env="dev",
-                         machine="local")
-    # Follow.get_rule("follow", "kuaishou", 1)
-    # Follow.get_rule("follow", "kuaishou", 2)
-
-    pass

+ 49 - 0
kuaishou/kuaishou_main/run_kuaishou_follow.py

@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/27
+import argparse
+import os
+import sys
+import time
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from kuaishou.kuaishou_follow.kuaishou_follow import Follow
+
+
+def main(log_type, crawler, strategy, oss_endpoint, env, machine):
+    while True:
+        try:
+            Common.logger(log_type, crawler).info('开始抓取 快手 定向榜\n')
+            Follow.get_follow_videos(log_type=log_type,
+                                     crawler=crawler,
+                                     strategy=strategy,
+                                     oss_endpoint=oss_endpoint,
+                                     env=env,
+                                     machine=machine)
+            Common.del_logs(log_type, crawler)
+            Common.logger(log_type, crawler).info('抓取完一轮,休眠 1 分钟\n')
+            time.sleep(60)
+        except Exception as e:
+            Common.logger(log_type, crawler).info(f"快手定向榜异常,触发报警:{e}\n")
+            Feishu.bot(log_type, crawler, f"快手定向榜异常,触发报警:{e}")
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--strategy')  ## 添加参数
+    parser.add_argument('--our_uid')  ## 添加参数
+    parser.add_argument('--oss_endpoint')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    parser.add_argument('--machine')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    # print(args)
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         strategy=args.strategy,
+         oss_endpoint=args.oss_endpoint,
+         env=args.env,
+         machine=args.machine)

BIN
kuaishou/logs/.DS_Store


BIN
kuaishou/videos/🧨🧨新年的炮竹声再响也没有我给大家拜年的心意响来给大家拜年啦🧧🧧/image.jpg


BIN
kuaishou/videos/🧨🧨新年的炮竹声再响也没有我给大家拜年的心意响来给大家拜年啦🧧🧧/video.mp4


BIN
weixinzhishu/logs/.DS_Store


+ 124 - 0
weixinzhishu/weixinzhishu_main/weixinzhishu_test.py

@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/28
+import json
+import os
+import sys
+import time
+from datetime import date, timedelta
+import requests
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+
+
+class Test:
+    # 获取微信 key / openid
+    @classmethod
+    def get_wechat_key(cls, log_type, crawler):
+        """
+        获取微信 key / openid
+        https://w42nne6hzg.feishu.cn/sheets/shtcnqhMRUGunIfGnGXMOBYiy4K?sheet=sVL74k
+        :param log_type: 日志名
+        :param crawler: 哪款爬虫,填写:weixinzhishu
+        :return: search_key, openid
+        """
+        try:
+            while True:
+                sheet = Feishu.get_values_batch(log_type, crawler, 'sVL74k')
+                if sheet is None:
+                    Common.logger(log_type, crawler).warning(f"获取热词sheet:{sheet} ,10秒钟后重试")
+                    time.sleep(10)
+                else:
+                    break
+            for i in range(len(sheet)):
+                search_key = sheet[1][1]
+                openid = sheet[1][2]
+                return search_key, openid
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"wechat_key:{e}\n")
+
+    @classmethod
+    def get_words(cls, log_type, crawler):
+        try:
+            while True:
+                sheet = Feishu.get_values_batch(log_type, crawler, '6dsgUk')
+                if sheet is None:
+                    Common.logger(log_type, crawler).warning(f"获取热词sheet:{sheet} ,10秒钟后重试")
+                    time.sleep(10)
+                else:
+                    break
+            word_list = []
+            for x in sheet:
+                for y in x:
+                    if y is None:
+                        pass
+                    else:
+                        word_list.append(y)
+            return word_list
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_words:{e}\n")
+
+    @classmethod
+    def get_score_test(cls, log_type, crawler):
+        wechat_key = cls.get_wechat_key(log_type, crawler)
+        search_key = wechat_key[0]
+        openid = wechat_key[-1]
+
+        start_ymd = (date.today() + timedelta(days=-7)).strftime("%Y%m%d")
+        end_ymd = (date.today() + timedelta(days=0)).strftime("%Y%m%d")
+
+        word_list = cls.get_words(log_type, crawler)
+        for i in range(len(word_list)):
+            url = "https://search.weixin.qq.com/cgi-bin/wxaweb/wxindex"
+            payload = json.dumps({
+                "openid": openid,
+                "search_key": search_key,
+                "cgi_name": "GetDefaultIndex",
+                "start_ymd": start_ymd,
+                "end_ymd": end_ymd,
+                "query": word_list[i]
+            })
+            headers = {
+                'Host': 'search.weixin.qq.com',
+                'content-type': 'application/json',
+                'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.32(0x1800202a) NetType/WIFI Language/zh_CN',
+                'Referer': 'https://servicewechat.com/wxc026e7662ec26a3a/42/page-frame.html'
+            }
+            response = requests.request("POST", url, headers=headers, data=payload)
+            wechat_score_list = []
+            word_wechat_score_dict = {
+                "id": i+1,
+                "word": word_list[i],
+                "wechatScores": wechat_score_list,
+            }
+            if response.json()['code'] == -10000:
+                Common.logger(log_type, crawler).info(f"{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(time.time())))} response:{response.json()['msg']} 休眠 10 秒,重新获取")
+                time.sleep(10)
+                cls.get_score_test(log_type, crawler)
+            elif response.json()['code'] == -10002:
+                Common.logger(log_type, crawler).info("该词暂未收录")
+            elif response.json()['code'] != 0:
+                Common.logger(log_type, crawler).warning(f"{word_wechat_score_dict}")
+            else:
+                time_index = response.json()['content']['resp_list'][0]['indexes'][0]['time_indexes']
+                for x in range(len(time_index)):
+                    score_time = time_index[x]['time']
+                    score_time_str = f"{str(score_time)[:4]}-{str(score_time)[4:6]}-{str(score_time)[6:]}"
+                    score = time_index[x]['score']
+                    wechat_score_dict = {"score": score, "scoreDate": score_time_str}
+                    wechat_score_list.append(wechat_score_dict)
+                    Common.logger(log_type, crawler).info(f"wechat_score_dict:{wechat_score_dict}")
+                    # 写飞书
+                    Feishu.insert_columns(log_type, crawler, "5011a2", "ROWS", 1, 2)
+                    time.sleep(0.5)
+                    Feishu.update_values(log_type, crawler, "5011a2", "F2:Z2", [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
+                                                                           word_list[i],
+                                                                           score_time_str,
+                                                                           score]])
+                    Common.logger(log_type, crawler).info("写入飞书成功\n")
+
+
+if __name__ == "__main__":
+    Test.get_score_test("test", "weixinzhishu")
+    pass