12
0

4 کامیت‌ها 1955754332 ... 1a57a5b299

نویسنده SHA1 پیام تاریخ
  lierqiang 1a57a5b299 add kuaishou 推荐接入调度器 2 سال پیش
  lierqiang 53cda1f5dd add 快手作者接入调度器 2 سال پیش
  lierqiang 5a2c4aa111 add xigua作者接入调度器 2 سال پیش
  lierqiang af138109f9 add douyin推荐接入调度 2 سال پیش

+ 58 - 0
douyin/douyin_main/run_dy_recommend_scheduling.py

@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+# @Author: lierqiang
+# @Time: 2023/4/21
+import argparse
+import os
+import sys
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from douyin.douyin_recommend.dy_recommend_scheduling import DyRecommendScheduling
+from common.public import task_fun
+
+
+def main(log_type, crawler, task, oss_endpoint, env):
+    task = task_fun(task)
+    try:
+        Common.logger(log_type, crawler).info(f'开始抓取 {crawler}视频 定向榜\n')
+        DyRecommendScheduling.get_recommend_videos(log_type=log_type,
+                                                   crawler=crawler,
+                                                   task=task,
+                                                   oss_endpoint=oss_endpoint,
+                                                   env=env)
+        Common.del_logs(log_type, crawler)
+        Common.logger(log_type, crawler).info('抓取任务结束\n')
+    except Exception as e:
+        Common.logger(log_type, crawler).info(f"{crawler}视频异常,触发报警:{e}\n")
+        # Feishu.bot(log_type, crawler, f"{e}")
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', default='recommend')  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler', default='douyin')  ## 添加参数
+    parser.add_argument('--strategy', default='推荐')  ## 添加参数
+    parser.add_argument('--task')  ## 添加参数
+    parser.add_argument('--oss_endpoint', default='outer')  ## 添加参数
+    parser.add_argument('--env', default='dev')  ## 添加参数
+    # parser.add_argument('--machine')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    task = {
+        'task_dict': {'task_id': '17', 'task_name': '西瓜测试4.21', 'source': 'douyin', 'start_time': '1682010720000',
+                      'interval': '24', 'mode': 'author',
+                      'rule': {'duration': {'min': 40, 'max': 0}, 'play_cnt': {'min': 4000, 'max': 0},
+                               'period': {'min': 10, 'max': 0}, 'fans_cnt': {'min': 0, 'max': 0},
+                               'videos_cnt': {'min': 0, 'max': 0}, 'like_cnt': {'min': 0, 'max': 0},
+                               'width': {'min': 0, 'max': 0}, 'height': {'min': 0, 'max': 0}},
+                      'spider_name': 'run_dy_author_scheduling', 'machine': 'aliyun', 'status': '0',
+                      'create_time': '1682048632396', 'update_time': '1682048632396', 'operator': ''},
+        'rule_dict': {'duration': {'min': 0, 'max': 0}, 'play_cnt': {'min': 0, 'max': 0},
+                      'period': {'min': 0, 'max': 0}, 'fans_cnt': {'min': 0, 'max': 0},
+                      'videos_cnt': {'min': 0, 'max': 0},
+                      'like_cnt': {'min': 0, 'max': 0}, 'width': {'min': 0, 'max': 0},
+                      'height': {'min': 0, 'max': 0}}}
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         task=task,
+         oss_endpoint=args.oss_endpoint,
+         env=args.env)

تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 81 - 0
douyin/douyin_recommend/dy_recommend_scheduling.py


+ 248 - 449
kuaishou/kuaishou_follow/kuaishou_follow_scheduling.py

@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# @Author: wangkun
+# @Author: lierqiang
 # @Time: 2023/2/24
 import os
 import random
@@ -13,69 +13,70 @@ import json
 
 import urllib3
 from requests.adapters import HTTPAdapter
-from selenium import webdriver
-from selenium.webdriver import DesiredCapabilities
-from selenium.webdriver.chrome.service import Service
 
 sys.path.append(os.getcwd())
 from common.common import Common
 from common.feishu import Feishu
 from common.getuser import getUser
-from common.db import MysqlHelper
+# from common.db import MysqlHelper
+from common.scheduling_db import MysqlHelper
 from common.publish import Publish
+from common.public import random_title, get_config_from_mysql
+from common.public import get_user_from_mysql
+from common.userAgent import get_random_user_agent
 
 
-class Follow:
+class KuaiShouFollowScheduling:
     platform = "快手"
     tag = "快手爬虫,定向爬虫策略"
 
     @classmethod
     def get_rule(cls, log_type, crawler, index):
         try:
-            while True:
-                rule_sheet = Feishu.get_values_batch(log_type, crawler, "3iqG4z")
-                if rule_sheet is None:
-                    Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
-                    time.sleep(10)
-                    continue
-                if index == 1:
-                    rule_dict = {
-                        "play_cnt": f"{rule_sheet[1][1]}{rule_sheet[1][2]}",
-                        "video_width": f"{rule_sheet[2][1]}{rule_sheet[2][2]}",
-                        "video_height": f"{rule_sheet[3][1]}{rule_sheet[3][2]}",
-                        "like_cnt": f"{rule_sheet[4][1]}{rule_sheet[4][2]}",
-                        "duration": f"{rule_sheet[5][1]}{rule_sheet[5][2]}",
-                        "download_cnt": f"{rule_sheet[6][1]}{rule_sheet[6][2]}",
-                        "publish_time": f"{rule_sheet[7][1]}{rule_sheet[7][2]}",
-                    }
-                    # for k, v in rule_dict.items():
-                    #     Common.logger(log_type, crawler).info(f"{k}:{v}")
-                    return rule_dict
-                elif index == 2:
-                    rule_dict = {
-                        "play_cnt": f"{rule_sheet[9][1]}{rule_sheet[9][2]}",
-                        "video_width": f"{rule_sheet[10][1]}{rule_sheet[10][2]}",
-                        "video_height": f"{rule_sheet[11][1]}{rule_sheet[11][2]}",
-                        "like_cnt": f"{rule_sheet[12][1]}{rule_sheet[12][2]}",
-                        "duration": f"{rule_sheet[13][1]}{rule_sheet[13][2]}",
-                        "download_cnt": f"{rule_sheet[14][1]}{rule_sheet[14][2]}",
-                        "publish_time": f"{rule_sheet[15][1]}{rule_sheet[15][2]}",
-                    }
-                    # for k, v in rule_dict.items():
-                    #     Common.logger(log_type, crawler).info(f"{k}:{v}")
-                    return rule_dict
+            rule_sheet = Feishu.get_values_batch(log_type, crawler, "3iqG4z")
+            if index == 1:
+                rule_dict = {
+                    "play_cnt": f"{rule_sheet[1][1]}{rule_sheet[1][2]}",
+                    "video_width": f"{rule_sheet[2][1]}{rule_sheet[2][2]}",
+                    "video_height": f"{rule_sheet[3][1]}{rule_sheet[3][2]}",
+                    "like_cnt": f"{rule_sheet[4][1]}{rule_sheet[4][2]}",
+                    "duration": f"{rule_sheet[5][1]}{rule_sheet[5][2]}",
+                    "download_cnt": f"{rule_sheet[6][1]}{rule_sheet[6][2]}",
+                    "publish_time": f"{rule_sheet[7][1]}{rule_sheet[7][2]}",
+                }
+                # for k, v in rule_dict.items():
+                #     Common.logger(log_type, crawler).info(f"{k}:{v}")
+                return rule_dict
+            elif index == 2:
+                rule_dict = {
+                    "play_cnt": f"{rule_sheet[9][1]}{rule_sheet[9][2]}",
+                    "video_width": f"{rule_sheet[10][1]}{rule_sheet[10][2]}",
+                    "video_height": f"{rule_sheet[11][1]}{rule_sheet[11][2]}",
+                    "like_cnt": f"{rule_sheet[12][1]}{rule_sheet[12][2]}",
+                    "duration": f"{rule_sheet[13][1]}{rule_sheet[13][2]}",
+                    "download_cnt": f"{rule_sheet[14][1]}{rule_sheet[14][2]}",
+                    "publish_time": f"{rule_sheet[15][1]}{rule_sheet[15][2]}",
+                }
+                # for k, v in rule_dict.items():
+                #     Common.logger(log_type, crawler).info(f"{k}:{v}")
+                return rule_dict
         except Exception as e:
             Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
 
     @classmethod
     def download_rule(cls, video_dict, rule_dict):
-        if eval(f"{video_dict['play_cnt']}{rule_dict['play_cnt']}") is True \
-                and eval(f"{video_dict['video_width']}{rule_dict['video_width']}") is True \
-                and eval(f"{video_dict['video_height']}{rule_dict['video_height']}") is True \
-                and eval(f"{video_dict['like_cnt']}{rule_dict['like_cnt']}") is True \
-                and eval(f"{video_dict['duration']}{rule_dict['duration']}") is True \
-                and eval(f"{video_dict['publish_time']}{rule_dict['publish_time']}") is True:
-            return True
+        if video_dict['like_cnt'] >= rule_dict['like_cnt']['min']:
+            if video_dict['publish_time'] >= rule_dict['publish_time']['min']:
+                if video_dict['duration'] >= rule_dict['duration']['min']:
+                    if video_dict['video_width'] >= rule_dict['width']['min'] \
+                            or video_dict['video_height'] >= rule_dict['height']['min']:
+                        return True
+                    else:
+                        return False
+                else:
+                    return False
+            else:
+                return False
         else:
             return False
 
@@ -99,56 +100,31 @@ class Follow:
         except Exception as e:
             Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
 
-    # 万能标题
-    @classmethod
-    def random_title(cls, log_type, crawler):
-        try:
-            while True:
-                random_title_sheet = Feishu.get_values_batch(log_type, crawler, '0DiyXe')
-                if random_title_sheet is None:
-                    Common.logger(log_type, crawler).warning(f"filter_words_sheet:{random_title_sheet} 10秒钟后重试")
-                    continue
-                random_title_list = []
-                for x in random_title_sheet:
-                    for y in x:
-                        if y is None:
-                            pass
-                        else:
-                            random_title_list.append(y)
-                return random.choice(random_title_list)
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f'random_title:{e}\n')
-
     # 获取站外用户信息
     @classmethod
     def get_out_user_info(cls, log_type, crawler, out_uid):
         try:
             url = "https://www.kuaishou.com/graphql"
+
             payload = json.dumps({
                 "operationName": "visionProfile",
                 "variables": {
-                    "userId": str(out_uid)
+                    "userId": out_uid
                 },
                 "query": "query visionProfile($userId: String) {\n  visionProfile(userId: $userId) {\n    result\n    hostName\n    userProfile {\n      ownerCount {\n        fan\n        photo\n        follow\n        photo_public\n        __typename\n      }\n      profile {\n        gender\n        user_name\n        user_id\n        headurl\n        user_text\n        user_profile_bg_url\n        __typename\n      }\n      isFollowing\n      __typename\n    }\n    __typename\n  }\n}\n"
             })
             headers = {
-                # 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId={"".join(str(random.choice(range(1, 10))) for _ in range(10))}; kpn=KUAISHOU_VISION; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABE4wGjnJauApJelOpl9Xqo8TVDAyra7Pvo0rZtVgMSZxgVuw4Z6P2UtHv_CHOk2Ne2el1hdE_McCptWs8tRdtYlhXFlVOu8rQX7CwexzOBudJAfB3lDN8LPc4o4qHNwqFxy5J5j_WzdllbqMmaDUK9yUxX6XA-JFezzq9jvBwtGv7_hzB7pFrUcH39z0EYOQaZo5lDl-pE09Gw7wr8NvlZRoSdWlbobCW6oJxuQLJTUr9oj_uIiBhkeb1psaIIc3VwfYQ1UfvobrXAP_WpnRabE_3UZUBOygFMAE; kuaishou.server.web_ph=2b981e2051d7130c977fd31df97fe6f5ad54',
-                'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABszwASP0eGT2ym0NUdGf1PTi8_gDYrDms9otf5FKMUrx0V7MOTT1hhvpVJijmgku8KYtu3a6g3X1hZODMpX0ebZUip44txWPoY3VRqbsnBKEOs9Qkmx3uLaX33nq8KevOKLoO0cIE8nfwMJISe_BtCHr22cbJkofI0xfJXBt_ZgPqJIqWsUwdgwKzqRYn47ROkIqzeDfEjbxaZxiXy22ZhhoSlCobbmtjkvjpY9x730BPP_C5IiAl9EBaCTyvMw5IIcnkmcjMeWC8w6LKzeFMCNhcqrfkgCgFMAE; kuaishou.server.web_ph=19ae269a54f619c49def39ef5b54ae6d4353; kpn=KUAISHOU_VISION',
-                'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
-                'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
-                'content-type': 'application/json',
-                'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
-                'Cache-Control': 'no-cache',
-                'Connection': 'keep-alive',
+                'Accept': '*/*',
+                'Content-Type': 'application/json',
                 'Origin': 'https://www.kuaishou.com',
-                'Pragma': 'no-cache',
-                'Sec-Fetch-Dest': 'empty',
-                'Sec-Fetch-Mode': 'cors',
-                'Sec-Fetch-Site': 'same-origin',
-                'accept': '*/*',
-                'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
-                'sec-ch-ua-mobile': '?0',
-                'sec-ch-ua-platform': '"macOS"'
+                'Cookie': 'did=web_5d4d0dff78b7819f8b015e7a81e2ca98;; clientid=3; kpf=PC_WEB; kpn=KUAISHOU_VISION',
+                'Content-Length': '552',
+                'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
+                'Host': 'www.kuaishou.com',
+                'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6.1 Safari/605.1.15',
+                'Referer': 'https://www.kuaishou.com/profile/{}'.format(out_uid),
+                'Accept-Encoding': 'gzip, deflate, br',
+                'Connection': 'keep-alive'
             }
             urllib3.disable_warnings()
             s = requests.session()
@@ -216,7 +192,7 @@ class Follow:
 
     # 获取用户信息列表
     @classmethod
-    def get_user_list(cls, log_type, crawler, sheetid, env, machine):
+    def get_user_list(cls, log_type, crawler, sheetid, env):
         try:
             while True:
                 user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
@@ -251,7 +227,7 @@ class Follow:
                                 "tag": cls.tag,
                             }
                             our_user_dict = getUser.create_user(log_type=log_type, crawler=crawler,
-                                                              out_user_dict=out_user_dict, env=env, machine=machine)
+                                                                out_user_dict=out_user_dict, env=env)
                             our_uid = our_user_dict['our_uid']
                             our_user_link = our_user_dict['our_user_link']
                             Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
@@ -272,7 +248,7 @@ class Follow:
 
     # 处理视频标题
     @classmethod
-    def video_title(cls, log_type, crawler, title):
+    def video_title(cls, log_type, crawler, env, title):
         title_split1 = title.split(" #")
         if title_split1[0] != "":
             title1 = title_split1[0]
@@ -297,361 +273,186 @@ class Follow:
                           .replace("#", "").replace(".", "。").replace("\\", "") \
                           .replace(":", "").replace("*", "").replace("?", "") \
                           .replace("?", "").replace('"', "").replace("<", "") \
-                          .replace(">", "").replace("|", "").replace("@", "")[:40]
+                          .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
         if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
-            return cls.random_title(log_type, crawler)
+            return random_title(log_type, crawler, env, text='title')
         else:
             return video_title
 
     @classmethod
-    def get_cookie(cls, log_type, crawler, out_uid, machine):
+    def get_videoList(cls, log_type, crawler, strategy, task, our_uid, out_uid, oss_endpoint, env, pcursor=""):
+        rule_dict_1 = task['rule_dict']
+        url = "https://www.kuaishou.com/graphql"
+        payload = json.dumps({
+            "operationName": "visionProfilePhotoList",
+            "variables": {
+                "userId": out_uid,
+                "pcursor": "",
+                "page": "profile"
+            },
+            "query": "fragment photoContent on PhotoEntity {\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  commentCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  __typename\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n  visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n    result\n    llsid\n    webPageArea\n    feeds {\n      ...feedContent\n      __typename\n    }\n    hostName\n    pcursor\n    __typename\n  }\n}\n"
+        })
+        headers = {
+            'Accept': '*/*',
+            'Content-Type': 'application/json',
+            'Origin': 'https://www.kuaishou.com',
+            'Cookie': 'kpf=PC_WEB; clientid=3; did=web_44b06f828a7810da393092aa6bb8dde0; kpn=KUAISHOU_VISION',
+            'Content-Length': '1260',
+            'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
+            'Host': 'www.kuaishou.com',
+            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6.1 Safari/605.1.15',
+            'Referer': 'https://www.kuaishou.com/profile/{}'.format(out_uid),
+            'Accept-Encoding': 'gzip, deflate, br',
+            'Connection': 'keep-alive'
+        }
         try:
-            # 打印请求配置
-            ca = DesiredCapabilities.CHROME
-            ca["goog:loggingPrefs"] = {"performance": "ALL"}
-
-            # 不打开浏览器运行
-            chrome_options = webdriver.ChromeOptions()
-            chrome_options.add_argument("headless")
-            chrome_options.add_argument(
-                f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
-            chrome_options.add_argument("--no-sandbox")
-
-            # driver初始化
-            if machine == "aliyun":
-                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
-            elif machine == "macpro":
-                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options,
-                                          service=Service('/Users/lieyunye/Downloads/chromedriver_v107/chromedriver'))
-            elif machine == "macair":
-                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options,
-                                          service=Service('/Users/piaoquan/Downloads/chromedriver_v108/chromedriver'))
-            else:
-                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
-                    '/Users/wangkun/Downloads/chromedriver/chromedriver_v109/chromedriver'))
-
-            driver.implicitly_wait(10)
-            # print('打开个人主页')
-            driver.get(f'https://www.kuaishou.com/profile/{out_uid}')
-            time.sleep(1)
-
-            # print('解析cookies')
-            logs = driver.get_log("performance")
-            # Common.logger(log_type, crawler).info('已获取logs:{}\n', logs)
-            # print('退出浏览器')
-            driver.quit()
-            for line in logs:
-                msg = json.loads(line['message'])
-                # Common.logger(log_type, crawler).info(f"{msg}\n\n")
-                if 'message' not in msg:
-                    pass
-                elif 'params' not in msg['message']:
-                    pass
-                elif 'headers' not in msg['message']['params']:
-                    pass
-                elif 'Cookie' not in msg['message']['params']['headers']:
-                    pass
-                elif msg['message']['params']['headers']['Host'] != 'www.kuaishou.com':
-                    pass
-                else:
-                    cookie = msg['message']['params']['headers']['Cookie']
-                    # Common.logger(log_type, crawler).info(f"{cookie}")
-                    return cookie
+            response = requests.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(),
+                                     verify=False, timeout=10)
+            feeds = response.json()['data']['visionProfilePhotoList']['feeds']
         except Exception as e:
-            Common.logger(log_type, crawler).error(f"get_cookie:{e}\n")
-
-    @classmethod
-    def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
-        try:
-            download_cnt_1, download_cnt_2 = 0, 0
-            pcursor = ""
+            Common.logger(log_type, crawler).error(f"get_videoList:{e},response:{response.text}")
+            return
+        if not feeds:
+            Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
+            return
+        pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
+        for i in range(len(feeds)):
+            # video_title
+            if 'caption' not in feeds[i]['photo']:
+                video_title = random_title(log_type, crawler, env, text='title')
+            elif feeds[i]['photo']['caption'].strip() == "":
+                video_title = random_title(log_type, crawler, env, text='title')
+            else:
+                video_title = cls.video_title(log_type, crawler, env, feeds[i]['photo']['caption'])
+
+            if 'videoResource' not in feeds[i]['photo'] \
+                    and 'manifest' not in feeds[i]['photo'] \
+                    and 'manifestH265' not in feeds[i]['photo']:
+                Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
+                break
+            videoResource = feeds[i]['photo']['videoResource']
+
+            if 'h264' not in videoResource and 'hevc' not in videoResource:
+                Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
+                break
+
+            # video_id
+            if 'h264' in videoResource and 'videoId' in videoResource['h264']:
+                video_id = videoResource['h264']['videoId']
+            elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
+                video_id = videoResource['hevc']['videoId']
+            else:
+                video_id = ""
 
-            while True:
-                rule_dict_1 = cls.get_rule(log_type, crawler, 1)
-                rule_dict_2 = cls.get_rule(log_type, crawler, 2)
-                if rule_dict_1 is None or rule_dict_2 is None:
-                    Common.logger(log_type, crawler).warning(f"rule_dict is None, 10秒后重试")
-                    time.sleep(10)
-                else:
-                    break
+            # play_cnt
+            if 'viewCount' not in feeds[i]['photo']:
+                play_cnt = 0
+            else:
+                play_cnt = int(feeds[i]['photo']['viewCount'])
 
-            while True:
-                if download_cnt_1 >= int(
-                        rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[
-                            -1]) and download_cnt_2 >= int(
-                        rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
-                    Common.logger(log_type, crawler).info(
-                        f"规则1已下载{download_cnt_1}条视频,规则2已下载{download_cnt_2}条视频\n")
-                    return
+            # like_cnt
+            if 'realLikeCount' not in feeds[i]['photo']:
+                like_cnt = 0
+            else:
+                like_cnt = feeds[i]['photo']['realLikeCount']
 
-                url = "https://www.kuaishou.com/graphql"
-                payload = json.dumps({
-                    "operationName": "visionProfilePhotoList",
-                    "variables": {
-                        "userId": out_uid,
-                        "pcursor": pcursor,
-                        "page": "profile"
-                    },
-                    "query": "fragment photoContent on PhotoEntity {\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  __typename\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n  visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n    result\n    llsid\n    webPageArea\n    feeds {\n      ...feedContent\n      __typename\n    }\n    hostName\n    pcursor\n    __typename\n  }\n}\n"
-                })
-                # get_cookie = cls.get_cookie(log_type, crawler, out_uid, machine)
-                # if get_cookie is None:
-                #     cookie = 'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABszwASP0eGT2ym0NUdGf1PTi8_gDYrDms9otf5FKMUrx0V7MOTT1hhvpVJijmgku8KYtu3a6g3X1hZODMpX0ebZUip44txWPoY3VRqbsnBKEOs9Qkmx3uLaX33nq8KevOKLoO0cIE8nfwMJISe_BtCHr22cbJkofI0xfJXBt_ZgPqJIqWsUwdgwKzqRYn47ROkIqzeDfEjbxaZxiXy22ZhhoSlCobbmtjkvjpY9x730BPP_C5IiAl9EBaCTyvMw5IIcnkmcjMeWC8w6LKzeFMCNhcqrfkgCgFMAE; kuaishou.server.web_ph=19ae269a54f619c49def39ef5b54ae6d4353; kpn=KUAISHOU_VISION'
-                # else:
-                #     cookie = get_cookie
-                # Common.logger(log_type, crawler).info(f"cookie:{cookie}")
-                headers = {
-                    # 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId={"".join(str(random.choice(range(1, 10))) for _ in range(10))}; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABOLgYYcIJ5ilxU46Jc-HLWThY8sppX3V0htC_KhSGOzAjP2hAOdegzfkZGAxS5rf6rCBS487FkxfYzLkV__I6b1lK16rDjvv94Kkoo4z7mgf8y8rFgWoqrp81JAWTtx00y-wrc1XXPf9RAVQoET70wWaeNG2r5bxtZEiNwpK_zPi0ZdUo0BW13dFKfVssAy2xKYh0UlJ8VSd_vBvyMKSxVBoSf061Kc3w5Nem7YdpVBmH39ceIiBpiGioLzbZqlHiSbwkH_LhUhNXz3o7LITj098KUytk2CgFMAE; kuaishou.server.web_ph=f1033957981996a7d50e849a9ded4cf4adff; kpn=KUAISHOU_VISION',
-                    # 'Cookie': cookie,
-                    'Cookie': 'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABszwASP0eGT2ym0NUdGf1PTi8_gDYrDms9otf5FKMUrx0V7MOTT1hhvpVJijmgku8KYtu3a6g3X1hZODMpX0ebZUip44txWPoY3VRqbsnBKEOs9Qkmx3uLaX33nq8KevOKLoO0cIE8nfwMJISe_BtCHr22cbJkofI0xfJXBt_ZgPqJIqWsUwdgwKzqRYn47ROkIqzeDfEjbxaZxiXy22ZhhoSlCobbmtjkvjpY9x730BPP_C5IiAl9EBaCTyvMw5IIcnkmcjMeWC8w6LKzeFMCNhcqrfkgCgFMAE; kuaishou.server.web_ph=19ae269a54f619c49def39ef5b54ae6d4353; kpn=KUAISHOU_VISION',
-                    'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
-                    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
-                    'content-type': 'application/json',
-                    # 'accept': '*/*',
-                    # 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
-                    # 'Cache-Control': 'no-cache',
-                    # 'Connection': 'keep-alive',
-                    # 'Origin': 'https://www.kuaishou.com',
-                    # 'Pragma': 'no-cache',
-                    # 'Sec-Fetch-Dest': 'empty',
-                    # 'Sec-Fetch-Mode': 'cors',
-                    # 'Sec-Fetch-Site': 'same-origin',
-                    # 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
-                    # 'sec-ch-ua-mobile': '?0',
-                    # 'sec-ch-ua-platform': '"macOS"'
-                }
-                urllib3.disable_warnings()
-                s = requests.session()
-                # max_retries=3 重试3次
-                s.mount('http://', HTTPAdapter(max_retries=3))
-                s.mount('https://', HTTPAdapter(max_retries=3))
-                response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False,
-                                  timeout=5)
-                response.close()
-                # Common.logger(log_type, crawler).info(f"get_videoList:{response.text}\n")
-                if response.status_code != 200:
-                    Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.text}\n")
-                    return
-                elif 'data' not in response.json():
-                    Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
-                    return
-                elif 'visionProfilePhotoList' not in response.json()['data']:
-                    Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
-                    return
-                elif 'feeds' not in response.json()['data']['visionProfilePhotoList']:
-                    Common.logger(log_type, crawler).warning(
-                        f"get_videoList_response:{response.json()['data']['visionProfilePhotoList']}\n")
-                    return
-                elif len(response.json()['data']['visionProfilePhotoList']['feeds']) == 0:
-                    Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
-                    return
-                else:
-                    feeds = response.json()['data']['visionProfilePhotoList']['feeds']
-                    pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
-                    # Common.logger(log_type, crawler).info(f"feeds0: {feeds}\n")
-                    for i in range(len(feeds)):
-                        if 'photo' not in feeds[i]:
-                            Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]}\n")
-                            break
-
-                        # video_title
-                        if 'caption' not in feeds[i]['photo']:
-                            video_title = cls.random_title(log_type, crawler)
-                        elif feeds[i]['photo']['caption'].strip() == "":
-                            video_title = cls.random_title(log_type, crawler)
-                        else:
-                            video_title = cls.video_title(log_type, crawler, feeds[i]['photo']['caption'])
-
-                        if 'videoResource' not in feeds[i]['photo'] \
-                                and 'manifest' not in feeds[i]['photo'] \
-                                and 'manifestH265' not in feeds[i]['photo']:
-                            Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
-                            break
-                        videoResource = feeds[i]['photo']['videoResource']
-
-                        if 'h264' not in videoResource and 'hevc' not in videoResource:
-                            Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
-                            break
-
-                        # video_id
-                        if 'h264' in videoResource and 'videoId' in videoResource['h264']:
-                            video_id = videoResource['h264']['videoId']
-                        elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
-                            video_id = videoResource['hevc']['videoId']
-                        else:
-                            video_id = ""
+            # publish_time
+            if 'timestamp' not in feeds[i]['photo']:
+                publish_time_stamp = 0
+                publish_time_str = ''
+                publish_time = 0
+            else:
+                publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
+                publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
 
-                        # play_cnt
-                        if 'viewCount' not in feeds[i]['photo']:
-                            play_cnt = 0
-                        else:
-                            play_cnt = int(feeds[i]['photo']['viewCount'])
+            # duration
+            if 'duration' not in feeds[i]['photo']:
+                duration = 0
+            else:
+                duration = int(int(feeds[i]['photo']['duration']) / 1000)
 
-                        # like_cnt
-                        if 'realLikeCount' not in feeds[i]['photo']:
-                            like_cnt = 0
-                        else:
-                            like_cnt = feeds[i]['photo']['realLikeCount']
+            # video_width / video_height / video_url
+            mapping = {}
+            for item in ['width', 'height']:
+                try:
+                    val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
+                except:
+                    val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
+                mapping[item] = val
+            video_width = int(mapping['width']) if mapping['width'] else 0
+            video_height = int(mapping['height']) if mapping['height'] else 0
+            # cover_url
+            if 'coverUrl' not in feeds[i]['photo']:
+                cover_url = ""
+            else:
+                cover_url = feeds[i]['photo']['coverUrl']
+
+            # user_name / avatar_url
+            user_name = feeds[i]['author']['name']
+            avatar_url = feeds[i]['author']['headerUrl']
+
+            video_url = feeds[i]['photo']['photoUrl']
+            video_dict = {'video_title': video_title,
+                          'video_id': video_id,
+                          'play_cnt': play_cnt,
+                          'comment_cnt': 0,
+                          'like_cnt': like_cnt,
+                          'share_cnt': 0,
+                          'video_width': video_width,
+                          'video_height': video_height,
+                          'duration': duration,
+                          'publish_time': publish_time,
+                          'publish_time_stamp': publish_time_stamp,
+                          'publish_time_str': publish_time_str,
+                          'user_name': user_name,
+                          'user_id': out_uid,
+                          'avatar_url': avatar_url,
+                          'cover_url': cover_url,
+                          'video_url': video_url,
+                          'session': f"kuaishou{int(time.time())}"}
+            for k, v in video_dict.items():
+                Common.logger(log_type, crawler).info(f"{k}:{v}")
+            rule_1 = cls.download_rule(video_dict, rule_dict_1)
+            if rule_1 is True:
+                cls.download_publish(log_type=log_type,
+                                     crawler=crawler,
+                                     strategy=strategy,
+                                     video_dict=video_dict,
+                                     rule_dict=rule_dict_1,
+                                     our_uid=our_uid,
+                                     oss_endpoint=oss_endpoint,
+                                     env=env,
+                                     )
 
-                        # publish_time
-                        if 'timestamp' not in feeds[i]['photo']:
-                            publish_time_stamp = 0
-                            publish_time_str = ''
-                            publish_time = 0
-                        else:
-                            publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
-                            publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-                            publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
 
-                        # duration
-                        if 'duration' not in feeds[i]['photo']:
-                            duration = 0
-                        else:
-                            duration = int(int(feeds[i]['photo']['duration']) / 1000)
-
-                        # video_width / video_height / video_url
-                        mapping = {}
-                        for item in ['width', 'height', 'url']:
-                            try:
-                                val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
-                            except Exception:
-                                val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
-                            except:
-                                val = ''
-                            mapping[item] = val
-                        video_width = int(mapping['width']) if mapping['width'] != '' else 0
-                        video_height = int(mapping['height']) if mapping['height'] != '' else 0
-                        video_url = mapping['url']
-
-                        # cover_url
-                        if 'coverUrl' not in feeds[i]['photo']:
-                            cover_url = ""
-                        else:
-                            cover_url = feeds[i]['photo']['coverUrl']
-
-                        # user_name / avatar_url
-                        try:
-                            user_name = feeds[i]['author']['name']
-                            avatar_url = feeds[i]['author']['headerUrl']
-                        except Exception:
-                            user_name = ''
-                            avatar_url = ''
-
-                        video_dict = {'video_title': video_title,
-                                      'video_id': video_id,
-                                      'play_cnt': play_cnt,
-                                      'comment_cnt': 0,
-                                      'like_cnt': like_cnt,
-                                      'share_cnt': 0,
-                                      'video_width': video_width,
-                                      'video_height': video_height,
-                                      'duration': duration,
-                                      'publish_time': publish_time,
-                                      'publish_time_stamp': publish_time_stamp,
-                                      'publish_time_str': publish_time_str,
-                                      'user_name': user_name,
-                                      'user_id': out_uid,
-                                      'avatar_url': avatar_url,
-                                      'cover_url': cover_url,
-                                      'video_url': video_url,
-                                      'session': f"kuaishou{int(time.time())}"}
-
-                        rule_1 = cls.download_rule(video_dict, rule_dict_1)
-                        Common.logger(log_type, crawler).info(f"video_title:{video_title}")
-                        Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
-
-                        Common.logger(log_type, crawler).info(
-                            f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
-                        Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
-
-                        rule_2 = cls.download_rule(video_dict, rule_dict_2)
-                        Common.logger(log_type, crawler).info(
-                            f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"publish_time:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
-                        Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
-
-                        if video_title == "" or video_url == "":
-                            Common.logger(log_type, crawler).info("无效视频\n")
-                            break
-                        elif rule_1 is True:
-                            if download_cnt_1 < int(
-                                    rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
-                                                                                                                  "")[
-                                        -1]):
-                                download_finished = cls.download_publish(log_type=log_type,
-                                                                         crawler=crawler,
-                                                                         strategy=strategy,
-                                                                         video_dict=video_dict,
-                                                                         rule_dict=rule_dict_1,
-                                                                         our_uid=our_uid,
-                                                                         oss_endpoint=oss_endpoint,
-                                                                         env=env,
-                                                                         machine=machine)
-                                if download_finished is True:
-                                    download_cnt_1 += 1
-                        elif rule_2 is True:
-                            if download_cnt_2 < int(
-                                    rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
-                                                                                                                  "")[
-                                        -1]):
-                                download_finished = cls.download_publish(log_type=log_type,
-                                                                         crawler=crawler,
-                                                                         strategy=strategy,
-                                                                         video_dict=video_dict,
-                                                                         rule_dict=rule_dict_2,
-                                                                         our_uid=our_uid,
-                                                                         oss_endpoint=oss_endpoint,
-                                                                         env=env,
-                                                                         machine=machine)
-                                if download_finished is True:
-                                    download_cnt_2 += 1
-                        else:
-                            Common.logger(log_type, crawler).info("不满足下载规则\n")
-                            # Common.logger(log_type, crawler).info(f"feeds: {feeds}\n")
+            else:
+                Common.logger(log_type, crawler).info("不满足下载规则\n")
 
-                    if pcursor == "no_more":
-                        Common.logger(log_type, crawler).info("已经到底了,没有更多内容了\n")
-                        return
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
+            # if pcursor == "no_more":
+            #     Common.logger(log_type, crawler).info(f"作者,{out_uid},已经到底了,没有更多内容了\n")
+            #     return
+            # cls.get_videoList(log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env,
+            #               pcursor=pcursor)
+            # time.sleep(random.randint(1, 3))
 
     @classmethod
-    def repeat_video(cls, log_type, crawler, video_id, video_title, publish_time, env, machine):
+    def repeat_video(cls, log_type, crawler, video_id, video_title, publish_time, env):
         sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" or (platform="{cls.platform}" and video_title="{video_title}" and publish_time="{publish_time}") """
-        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         return len(repeat_video)
 
     @classmethod
-    def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
+    def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env):
         try:
-            download_finished = False
+            filter_words = get_config_from_mysql(log_type, crawler, env, text='filter')
+            for filter_word in filter_words:
+                if filter_word in video_dict['video_title']:
+                    Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
+                    return
             if cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'],
-                                video_dict['publish_time_str'], env, machine) != 0:
-                Common.logger(log_type, crawler).info('视频已下载\n')
-            elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, crawler, "3cd128") for x in y]:
+                                video_dict['publish_time_str'], env) != 0:
                 Common.logger(log_type, crawler).info('视频已下载\n')
-            elif any(word if word in video_dict['video_title'] else False for word in
-                     cls.filter_words(log_type, crawler)) is True:
-                Common.logger(log_type, crawler).info('标题已中过滤词\n')
             else:
                 # 下载视频
                 Common.download_method(log_type=log_type, crawler=crawler, text='video',
@@ -693,7 +494,7 @@ class Follow:
                     Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
                     # 删除视频文件夹
                     shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-                    return download_finished
+                    return
 
                 # 视频信息保存数据库
                 insert_sql = f""" insert into crawler_video(video_id,
@@ -727,7 +528,7 @@ class Follow:
                                                         {int(video_dict['video_width'])},
                                                         {int(video_dict['video_height'])}) """
                 Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-                MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
+                MysqlHelper.update_values(log_type, crawler, insert_sql, env)
                 Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
 
                 # 视频写入飞书
@@ -755,43 +556,41 @@ class Follow:
                 Feishu.update_values(log_type, 'kuaishou', "fYdA8F", "E2:Z2", values)
                 Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
                 download_finished = True
-            return download_finished
+            return
         except Exception as e:
             Common.logger(log_type, crawler).error(f"download_publish:{e}\n")
 
     @classmethod
-    def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
-        user_list = cls.get_user_list(log_type=log_type, crawler=crawler, sheetid="bTSzxW", env=env, machine=machine)
+    def get_follow_videos(cls, log_type, crawler, task, oss_endpoint, env):
+        user_list = get_user_from_mysql(log_type, crawler, crawler, env)
+        strategy = '定向抓取策略'
         for user in user_list:
-            out_uid = user["out_uid"]
-            user_name = user["user_name"]
-            our_uid = user["our_uid"]
-            Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
-            cls.get_videoList(log_type=log_type,
-                              crawler=crawler,
-                              strategy=strategy,
-                              our_uid=our_uid,
-                              out_uid=out_uid,
-                              oss_endpoint=oss_endpoint,
-                              env=env,
-                              machine=machine)
-            sleep_time = 120
-            Common.logger(log_type, crawler).info(f"休眠{sleep_time}秒\n")
-            time.sleep(sleep_time)
+            try:
+                spider_link = user["link"]
+                out_uid = spider_link.split('/')[-1]
+                user_name = user["nick_name"]
+                our_uid = user["uid"]
+                Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
+                cls.get_videoList(log_type=log_type,
+                                  crawler=crawler,
+                                  strategy=strategy,
+                                  task=task,
+                                  our_uid=our_uid,
+                                  out_uid=out_uid,
+                                  oss_endpoint=oss_endpoint,
+                                  env=env)
+            except Exception as e:
+                continue
 
 
 if __name__ == "__main__":
-    # Follow.get_videoList(log_type="follow",
-    #                      crawler="kuaishou",
-    #                      strategy="定向爬虫策略",
-    #                      our_uid="6282431",
-    #                      out_uid="3xws7ydsnmp5mgq",
-    #                      oss_endpoint="out",
-    #                      env="dev",
-    #                      machine="local")
-    # print(Follow.get_out_user_info("follow", "kuaishou", "3xgh4ja9be3wcaw"))
+    KuaiShouFollowScheduling.get_follow_videos(
+        log_type="follow",
+        crawler="kuaishou",
+        task="",
+        oss_endpoint="out",
+        env="dev",
+    )
+
+    # print(KuaiShouFollow.get_out_user_info("follow", "kuaishou", "3xnk3wbm3vfiha6"))
     # print(Follow.get_out_user_info("follow", "kuaishou", "3x5wgjhfc7tx8ue"))
-    print(Follow.get_cookie("cookies", "kuaishou", "3xvp5w6twj77xeq", "local"))
-    print(Follow.get_cookie("cookies", "kuaishou", "3xgh4ja9be3wcaw", "local"))
-    print(Follow.get_cookie("cookies", "kuaishou", "3x5wgjhfc7tx8ue", "local"))
-    pass

+ 40 - 32
kuaishou/kuaishou_main/run_kuaishou_follow_scheduling.py

@@ -1,49 +1,57 @@
 # -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/2/27
+# @Author: lierqiang
+# @Time: 2023/4/21
 import argparse
 import os
 import sys
-# import time
 
 sys.path.append(os.getcwd())
 from common.common import Common
-# from common.feishu import Feishu
-from kuaishou.kuaishou_follow.kuaishou_follow_scheduling import Follow
+from kuaishou.kuaishou_follow.kuaishou_follow_scheduling import KuaiShouFollowScheduling
+from common.public import task_fun
 
 
-def main(log_type, crawler, strategy, oss_endpoint, env, machine):
-    while True:
-        try:
-            Common.logger(log_type, crawler).info('开始抓取 快手 定向榜\n')
-            Follow.get_follow_videos(log_type=log_type,
-                                     crawler=crawler,
-                                     strategy=strategy,
-                                     oss_endpoint=oss_endpoint,
-                                     env=env,
-                                     machine=machine)
-            Common.del_logs(log_type, crawler)
-            Common.logger(log_type, crawler).info('抓取完一轮\n')
-            break
-        except Exception as e:
-            Common.logger(log_type, crawler).info(f"快手定向榜异常,触发报警:{e}\n")
-            # Feishu.bot(log_type, crawler, f"快手定向榜异常,触发报警:{e}")
+def main(log_type, crawler, task, oss_endpoint, env):
+    task = task_fun(task)
+    try:
+        Common.logger(log_type, crawler).info(f'开始抓取 {crawler}视频 定向榜\n')
+        KuaiShouFollowScheduling.get_follow_videos(log_type=log_type,
+                                             crawler=crawler,
+                                             task=task,
+                                             oss_endpoint=oss_endpoint,
+                                             env=env)
+        Common.del_logs(log_type, crawler)
+        Common.logger(log_type, crawler).info('抓取任务结束\n')
+    except Exception as e:
+        Common.logger(log_type, crawler).info(f"{crawler}视频异常,触发报警:{e}\n")
+        # Feishu.bot(log_type, crawler, f"{e}")
 
 
 if __name__ == "__main__":
     parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler')  ## 添加参数
-    parser.add_argument('--strategy')  ## 添加参数
-    parser.add_argument('--our_uid')  ## 添加参数
-    parser.add_argument('--oss_endpoint')  ## 添加参数
-    parser.add_argument('--env')  ## 添加参数
-    parser.add_argument('--machine')  ## 添加参数
+    parser.add_argument('--log_type', default='author')  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler', default='kuaishou')  ## 添加参数
+    parser.add_argument('--strategy', default='定向抓取')  ## 添加参数
+    parser.add_argument('--task')  ## 添加参数
+    parser.add_argument('--oss_endpoint', default='outer')  ## 添加参数
+    parser.add_argument('--env', default='dev')  ## 添加参数
+    # parser.add_argument('--machine')  ## 添加参数
     args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    # print(args)
+    task = {
+        'task_dict': {'task_id': '17', 'task_name': '西瓜测试4.21', 'source': 'kuaishou', 'start_time': '1682010720000',
+                      'interval': '24', 'mode': 'author',
+                      'rule': {'duration': {'min': 40, 'max': 0}, 'play_cnt': {'min': 4000, 'max': 0},
+                               'period': {'min': 10, 'max': 0}, 'fans_cnt': {'min': 0, 'max': 0},
+                               'videos_cnt': {'min': 0, 'max': 0}, 'like_cnt': {'min': 0, 'max': 0},
+                               'width': {'min': 0, 'max': 0}, 'height': {'min': 0, 'max': 0}},
+                      'spider_name': 'run_dy_author_scheduling', 'machine': 'aliyun', 'status': '0',
+                      'create_time': '1682048632396', 'update_time': '1682048632396', 'operator': ''},
+        'rule_dict': {'duration': {'min': 0, 'max': 0}, 'play_cnt': {'min': 0, 'max': 0},
+                      'period': {'min': 0, 'max': 0}, 'fans_cnt': {'min': 0, 'max': 0}, 'videos_cnt': {'min': 0, 'max': 0},
+                      'like_cnt': {'min': 0, 'max': 0}, 'width': {'min': 0, 'max': 0},
+                      'height': {'min': 0, 'max': 0},'publish_time':{'min':0}}}
     main(log_type=args.log_type,
          crawler=args.crawler,
-         strategy=args.strategy,
+         task=task,
          oss_endpoint=args.oss_endpoint,
-         env=args.env,
-         machine=args.machine)
+         env=args.env)

+ 57 - 0
kuaishou/kuaishou_main/run_kuaishou_recommend_scheduling.py

@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+# @Author: lierqiang
+# @Time: 2023/4/21
+import argparse
+import os
+import sys
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from kuaishou.kuaishou_recommend.kuaishou_recommend_shceduling import KuaiShouRecommendScheduling
+from common.public import task_fun
+
+
+def main(log_type, crawler, task, oss_endpoint, env):
+    task = task_fun(task)
+    try:
+        Common.logger(log_type, crawler).info(f'开始抓取 {crawler}视频 推荐榜\n')
+        KuaiShouRecommendScheduling.get_recommend_videos(log_type=log_type,
+                                             crawler=crawler,
+                                             task=task,
+                                             oss_endpoint=oss_endpoint,
+                                             env=env)
+        Common.del_logs(log_type, crawler)
+        Common.logger(log_type, crawler).info('抓取任务结束\n')
+    except Exception as e:
+        Common.logger(log_type, crawler).info(f"{crawler}视频异常,触发报警:{e}\n")
+        # Feishu.bot(log_type, crawler, f"{e}")
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', default='author')  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler', default='kuaishou')  ## 添加参数
+    parser.add_argument('--strategy', default='定向抓取')  ## 添加参数
+    parser.add_argument('--task')  ## 添加参数
+    parser.add_argument('--oss_endpoint', default='outer')  ## 添加参数
+    parser.add_argument('--env', default='dev')  ## 添加参数
+    # parser.add_argument('--machine')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    task = {
+        'task_dict': {'task_id': '17', 'task_name': '西瓜测试4.21', 'source': 'kuaishou', 'start_time': '1682010720000',
+                      'interval': '24', 'mode': 'author',
+                      'rule': {'duration': {'min': 40, 'max': 0}, 'play_cnt': {'min': 4000, 'max': 0},
+                               'period': {'min': 10, 'max': 0}, 'fans_cnt': {'min': 0, 'max': 0},
+                               'videos_cnt': {'min': 0, 'max': 0}, 'like_cnt': {'min': 0, 'max': 0},
+                               'width': {'min': 0, 'max': 0}, 'height': {'min': 0, 'max': 0}},
+                      'spider_name': 'run_dy_author_scheduling', 'machine': 'aliyun', 'status': '0',
+                      'create_time': '1682048632396', 'update_time': '1682048632396', 'operator': ''},
+        'rule_dict': {'duration': {'min': 0, 'max': 0}, 'play_cnt': {'min': 0, 'max': 0},
+                      'period': {'min': 0, 'max': 0}, 'fans_cnt': {'min': 0, 'max': 0}, 'videos_cnt': {'min': 0, 'max': 0},
+                      'like_cnt': {'min': 0, 'max': 0}, 'width': {'min': 0, 'max': 0},
+                      'height': {'min': 0, 'max': 0},'publish_time':{'min':0}}}
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         task=task,
+         oss_endpoint=args.oss_endpoint,
+         env=args.env)

+ 503 - 0
kuaishou/kuaishou_recommend/kuaishou_recommend_shceduling.py

@@ -0,0 +1,503 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/24
+import os
+import random
+import shutil
+import sys
+import time
+import string
+from hashlib import md5
+
+import requests
+import json
+
+import urllib3
+from requests.adapters import HTTPAdapter
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.getuser import getUser
+# from common.db import MysqlHelper
+from common.scheduling_db import MysqlHelper
+from common.publish import Publish
+from common.public import get_user_from_mysql, random_title, get_config_from_mysql
+from common.userAgent import get_random_user_agent
+
+
+class KuaiShouRecommendScheduling:
+    platform = "快手"
+    tag = "快手爬虫,推荐爬虫策略"
+
+    @classmethod
+    def get_rule(cls, log_type, crawler):
+        try:
+            rule_sheet = Feishu.get_values_batch(log_type, crawler, "NQ6CZN")
+            rule_dict = {
+                "play_cnt": f"{rule_sheet[0][1]}{rule_sheet[0][2]}",
+                "video_width": f"{rule_sheet[1][1]}{rule_sheet[1][2]}",
+                "video_height": f"{rule_sheet[2][1]}{rule_sheet[2][2]}",
+                "like_cnt": f"{rule_sheet[5][1]}{rule_sheet[5][2]}",
+                "duration": f"{rule_sheet[3][1]}{rule_sheet[3][2]}",
+                "publish_time": f"{rule_sheet[4][1]}{rule_sheet[4][2]}",
+            }
+            return rule_dict
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
+
+    @classmethod
+    def download_rule(cls, video_dict, rule_dict):
+        if video_dict['like_cnt'] >= rule_dict['like_cnt']['min']:
+            if video_dict['publish_time'] >= rule_dict['publish_time']['min']:
+                if video_dict['duration'] >= rule_dict['duration']['min']:
+                    if video_dict['video_width'] >= rule_dict['width']['min'] \
+                            or video_dict['video_height'] >= rule_dict['height']['min']:
+                        return True
+                    else:
+                        return False
+                else:
+                    return False
+            else:
+                return False
+        else:
+            return False
+
+    # 过滤词库
+    @classmethod
+    def filter_words(cls, log_type, crawler):
+        try:
+            while True:
+                filter_words_sheet = Feishu.get_values_batch(log_type, crawler, 'HIKVvs')
+                if filter_words_sheet is None:
+                    Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
+                    continue
+                filter_words_list = []
+                for x in filter_words_sheet:
+                    for y in x:
+                        if y is None:
+                            pass
+                        else:
+                            filter_words_list.append(y)
+                return filter_words_list
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
+
+    # 获取用户信息列表
+    @classmethod
+    def get_user_list(cls, log_type, crawler, sheetid, env):
+        try:
+            while True:
+                user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+                if user_sheet is None:
+                    Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet} 10秒钟后重试")
+                    continue
+                our_user_list = []
+                for i in range(1, len(user_sheet)):
+                    # for i in range(1, 2):
+                    out_uid = user_sheet[i][2]
+                    user_name = user_sheet[i][3]
+                    our_uid = user_sheet[i][6]
+                    our_user_link = user_sheet[i][7]
+                    if out_uid is None or user_name is None:
+                        Common.logger(log_type, crawler).info("空行\n")
+                    else:
+                        Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息\n")
+                        if our_uid is None:
+                            out_user_info = cls.get_out_user_info(log_type, crawler, out_uid)
+                            out_user_dict = {
+                                "out_uid": out_uid,
+                                "user_name": user_name,
+                                "out_avatar_url": out_user_info["out_avatar_url"],
+                                "out_create_time": '',
+                                "out_tag": '',
+                                "out_play_cnt": 0,
+                                "out_fans": out_user_info["out_fans"],
+                                "out_follow": out_user_info["out_follow"],
+                                "out_friend": 0,
+                                "out_like": 0,
+                                "platform": cls.platform,
+                                "tag": cls.tag,
+                            }
+                            our_user_dict = getUser.create_user(log_type=log_type, crawler=crawler,
+                                                                out_user_dict=out_user_dict, env=env)
+                            our_uid = our_user_dict['our_uid']
+                            our_user_link = our_user_dict['our_user_link']
+                            Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
+                                                 [[our_uid, our_user_link]])
+                            Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
+                            our_user_list.append(our_user_dict)
+                        else:
+                            our_user_dict = {
+                                'out_uid': out_uid,
+                                'user_name': user_name,
+                                'our_uid': our_uid,
+                                'our_user_link': our_user_link,
+                            }
+                            our_user_list.append(our_user_dict)
+                return our_user_list
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'get_user_list:{e}\n')
+
+    # 处理视频标题
+    @classmethod
+    def video_title(cls, log_type, crawler, env, title):
+        title_split1 = title.split(" #")
+        if title_split1[0] != "":
+            title1 = title_split1[0]
+        else:
+            title1 = title_split1[-1]
+
+        title_split2 = title1.split(" #")
+        if title_split2[0] != "":
+            title2 = title_split2[0]
+        else:
+            title2 = title_split2[-1]
+
+        title_split3 = title2.split("@")
+        if title_split3[0] != "":
+            title3 = title_split3[0]
+        else:
+            title3 = title_split3[-1]
+
+        video_title = title3.strip().replace("\n", "") \
+                          .replace("/", "").replace("快手", "").replace(" ", "") \
+                          .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
+                          .replace("#", "").replace(".", "。").replace("\\", "") \
+                          .replace(":", "").replace("*", "").replace("?", "") \
+                          .replace("?", "").replace('"', "").replace("<", "") \
+                          .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
+        if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
+            return random_title(log_type, crawler, env, text='title')
+        else:
+            return video_title
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, strategy, task, our_uid, oss_endpoint, env):
+        rule_dict_1 = task['rule_dict']
+        for i in range(100):
+            url = "https://www.kuaishou.com/graphql"
+
+            payload = json.dumps({
+                "operationName": "visionNewRecoFeed",
+                "variables": {
+                    "dailyFirstPage": False
+                },
+                "query": "fragment photoContent on PhotoEntity {\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  __typename\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nfragment photoResult on PhotoResult {\n  result\n  llsid\n  expTag\n  serverExpTag\n  pcursor\n  feeds {\n    ...feedContent\n    __typename\n  }\n  webPageArea\n  __typename\n}\n\nquery visionNewRecoFeed($semKeyword: String, $semCrowd: String, $utmSource: String, $utmMedium: String, $utmCampaign: String, $dailyFirstPage: Boolean) {\n  visionNewRecoFeed(semKeyword: $semKeyword, semCrowd: $semCrowd, utmSource: $utmSource, utmMedium: $utmMedium, utmCampaign: $utmCampaign, dailyFirstPage: $dailyFirstPage) {\n    ...photoResult\n    __typename\n  }\n}\n"
+            })
+            s = string.ascii_lowercase
+            r = random.choice(s)
+
+            headers = {
+                'Accept-Language': 'zh-CN,zh;q=0.9',
+                'Connection': 'keep-alive',
+                'Cookie': 'kpf=PC_WEB; clientid=3; did=web_aba004b1780f4d7174d0a2ff42da1f{r}7; kpn=KUAISHOU_VISION;'.format(
+                    r=r),
+                'Origin': 'https://www.kuaishou.com',
+                'Referer': 'https://www.kuaishou.com/new-reco',
+                'Sec-Fetch-Dest': 'empty',
+                'Sec-Fetch-Mode': 'cors',
+                'Sec-Fetch-Site': 'same-origin',
+                'User-Agent': get_random_user_agent('pc'),
+                'accept': '*/*',
+                'content-type': 'application/json',
+                'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
+                'sec-ch-ua-mobile': '?0',
+                'sec-ch-ua-platform': '"macOS"'
+            }
+
+            try:
+                urllib3.disable_warnings()
+                s = requests.session()
+                # max_retries=3 重试3次
+                s.mount('http://', HTTPAdapter(max_retries=3))
+                s.mount('https://', HTTPAdapter(max_retries=3))
+                response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False,
+                                  timeout=10)
+                response.close()
+            except Exception as e:
+                Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
+                continue
+            # Common.logger(log_type, crawler).info(f"get_videoList:{response.text}\n")
+            if response.status_code != 200:
+                Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.text}\n")
+                continue
+            elif 'data' not in response.json():
+                Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
+                continue
+            elif 'visionNewRecoFeed' not in response.json()['data']:
+                Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
+                continue
+            elif 'feeds' not in response.json()['data']['visionNewRecoFeed']:
+                Common.logger(log_type, crawler).warning(
+                    f"get_videoList_response:{response.json()['data']['visionNewRecoFeed']}\n")
+                continue
+            elif len(response.json()['data']['visionNewRecoFeed']['feeds']) == 0:
+                Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
+                continue
+            else:
+                feeds = response.json()['data']['visionNewRecoFeed']['feeds']
+                for i in range(len(feeds)):
+                    if 'photo' not in feeds[i]:
+                        Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]}\n")
+                        continue
+
+                    # video_title
+                    if 'caption' not in feeds[i]['photo']:
+                        video_title = random_title(log_type, crawler, env, text='title')
+
+                    elif feeds[i]['photo']['caption'].strip() == "":
+                        video_title = random_title(log_type, crawler, env, text='title')
+                    else:
+                        video_title = cls.video_title(log_type, crawler, env, feeds[i]['photo']['caption'])
+
+                    if 'videoResource' not in feeds[i]['photo'] \
+                            and 'manifest' not in feeds[i]['photo'] \
+                            and 'manifestH265' not in feeds[i]['photo']:
+                        Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
+                        continue
+                    videoResource = feeds[i]['photo']['videoResource']
+
+                    if 'h264' not in videoResource and 'hevc' not in videoResource:
+                        Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
+                        continue
+
+                    # video_id
+                    if 'h264' in videoResource and 'videoId' in videoResource['h264']:
+                        video_id = videoResource['h264']['videoId']
+                    elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
+                        video_id = videoResource['hevc']['videoId']
+                    else:
+                        video_id = ""
+
+                    # play_cnt
+                    if 'viewCount' not in feeds[i]['photo']:
+                        play_cnt = 0
+                    else:
+                        play_cnt = int(feeds[i]['photo']['viewCount'])
+
+                    # like_cnt
+                    if 'realLikeCount' not in feeds[i]['photo']:
+                        like_cnt = 0
+                    else:
+                        like_cnt = feeds[i]['photo']['realLikeCount']
+
+                    # publish_time
+                    if 'timestamp' not in feeds[i]['photo']:
+                        publish_time_stamp = 0
+                        publish_time_str = ''
+                        publish_time = 0
+                    else:
+                        publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
+                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                        publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
+
+                    # duration
+                    if 'duration' not in feeds[i]['photo']:
+                        duration = 0
+                    else:
+                        duration = int(int(feeds[i]['photo']['duration']) / 1000)
+
+                    # video_width / video_height / video_url
+                    mapping = {}
+                    for item in ['width', 'height']:
+                        try:
+                            val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
+                        except Exception:
+                            val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
+                        except:
+                            val = ''
+                        mapping[item] = val
+                    video_width = int(mapping['width']) if mapping['width'] != '' else 0
+                    video_height = int(mapping['height']) if mapping['height'] != '' else 0
+                    # cover_url
+                    if 'coverUrl' not in feeds[i]['photo']:
+                        cover_url = ""
+                    else:
+                        cover_url = feeds[i]['photo']['coverUrl']
+
+                    # user_name / avatar_url
+                    try:
+                        user_name = feeds[i]['author']['name']
+                        avatar_url = feeds[i]['author']['headerUrl']
+                        user_id = feeds[i]['author']['id']
+                    except Exception:
+                        user_name = ''
+                        avatar_url = ''
+                        user_id = ''
+                    video_url = feeds[i]['photo']['photoUrl']
+                    video_dict = {'video_title': video_title,
+                                  'video_id': video_id,
+                                  'play_cnt': play_cnt,
+                                  'comment_cnt': 0,
+                                  'like_cnt': like_cnt,
+                                  'share_cnt': 0,
+                                  'video_width': video_width,
+                                  'video_height': video_height,
+                                  'duration': duration,
+                                  'publish_time': publish_time,
+                                  'publish_time_stamp': publish_time_stamp,
+                                  'publish_time_str': publish_time_str,
+                                  'user_name': user_name,
+                                  'user_id': user_id,
+                                  'avatar_url': avatar_url,
+                                  'cover_url': cover_url,
+                                  'video_url': video_url,
+                                  'session': f"kuaishou{int(time.time())}"}
+
+                    rule_1 = cls.download_rule(video_dict, rule_dict_1)
+
+                    if rule_1 is True:
+                        cls.download_publish(log_type=log_type,
+                                             crawler=crawler,
+                                             strategy=strategy,
+                                             video_dict=video_dict,
+                                             rule_dict=rule_dict_1,
+                                             our_uid=our_uid,
+                                             oss_endpoint=oss_endpoint,
+                                             env=env,
+                                             )
+
+                    else:
+                        Common.logger(log_type, crawler).info("不满足下载规则\n")
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, video_title, publish_time, env):
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" or (platform="{cls.platform}" and video_title="{video_title}" and publish_time="{publish_time}") """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env):
+        try:
+            filter_words = get_config_from_mysql(log_type, crawler, env, text='filter')
+            for filter_word in filter_words:
+                if filter_word in video_dict['video_title']:
+                    Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
+                    return
+            download_finished = False
+            if cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'],
+                                video_dict['publish_time_str'], env) != 0:
+                Common.logger(log_type, crawler).info('视频已下载\n')
+            else:
+                # 下载视频
+                Common.download_method(log_type=log_type, crawler=crawler, text='video',
+                                       title=video_dict['video_title'], url=video_dict['video_url'])
+                md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+                if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                    Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                    return
+                # 下载封面
+                Common.download_method(log_type=log_type, crawler=crawler, text='cover',
+                                       title=video_dict['video_title'], url=video_dict['cover_url'])
+                # 保存视频信息至txt
+                Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+                # 上传视频
+                Common.logger(log_type, crawler).info("开始上传视频...")
+                our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                          crawler=crawler,
+                                                          strategy=strategy,
+                                                          our_uid=our_uid,
+                                                          env=env,
+                                                          oss_endpoint=oss_endpoint)
+                if env == 'dev':
+                    our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                else:
+                    our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                Common.logger(log_type, crawler).info("视频上传完成")
+
+                if our_video_id is None:
+                    Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+                    return download_finished
+
+                # 视频信息保存数据库
+                insert_sql = f""" insert into crawler_video(video_id,
+                                                        user_id,
+                                                        out_user_id,
+                                                        platform,
+                                                        strategy,
+                                                        out_video_id,
+                                                        video_title,
+                                                        cover_url,
+                                                        video_url,
+                                                        duration,
+                                                        publish_time,
+                                                        play_cnt,
+                                                        crawler_rule,
+                                                        width,
+                                                        height)
+                                                        values({our_video_id},
+                                                        {our_uid},
+                                                        "{video_dict['user_id']}",
+                                                        "{cls.platform}",
+                                                        "{strategy}",
+                                                        "{video_dict['video_id']}",
+                                                        "{video_dict['video_title']}",
+                                                        "{video_dict['cover_url']}",
+                                                        "{video_dict['video_url']}",
+                                                        {int(video_dict['duration'])},
+                                                        "{video_dict['publish_time_str']}",
+                                                        {int(video_dict['play_cnt'])},
+                                                        '{json.dumps(rule_dict)}',
+                                                        {int(video_dict['video_width'])},
+                                                        {int(video_dict['video_height'])}) """
+                Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+                Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+
+                # 视频写入飞书
+                Feishu.insert_columns(log_type, 'kuaishou', "Aps2BI", "ROWS", 1, 2)
+                upload_time = int(time.time())
+                values = [[our_video_id,
+                           time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                           strategy,
+                           str(video_dict['video_id']),
+                           video_dict['video_title'],
+                           our_video_link,
+                           video_dict['play_cnt'],
+                           video_dict['comment_cnt'],
+                           video_dict['like_cnt'],
+                           video_dict['share_cnt'],
+                           video_dict['duration'],
+                           f"{video_dict['video_width']}*{video_dict['video_height']}",
+                           video_dict['publish_time_str'],
+                           video_dict['user_name'],
+                           video_dict['user_id'],
+                           video_dict['avatar_url'],
+                           video_dict['cover_url'],
+                           video_dict['video_url']]]
+                time.sleep(1)
+                Feishu.update_values(log_type, 'kuaishou', "Aps2BI", "E2:Z2", values)
+                Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+                download_finished = True
+            return download_finished
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"download_publish:{e}\n")
+
+    @classmethod
+    def get_recommend_videos(cls, log_type, crawler, task, oss_endpoint, env):
+        user_list = get_user_from_mysql(log_type, crawler, crawler, env)
+        strategy = '推荐抓取策略'
+        for user in user_list:
+            spider_link = user["link"]
+            out_uid = spider_link
+            user_name = user["nick_name"]
+            our_uid = user["uid"]
+            Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
+            cls.get_videoList(log_type=log_type,
+                              crawler=crawler,
+                              strategy=strategy,
+                              task=task,
+                              our_uid=our_uid,
+                              oss_endpoint=oss_endpoint,
+                              env=env)
+
+
+if __name__ == "__main__":
+    task = ''
+    KuaiShouRecommendScheduling.get_recommend_videos('recommend', 'kuaishou', task, 'outer', 'prod')

+ 148 - 173
xigua/xigua_follow/xigua_follow_scheduling.py

@@ -15,18 +15,40 @@ from hashlib import md5
 import requests
 import urllib3
 from requests.adapters import HTTPAdapter
+
 sys.path.append(os.getcwd())
-from common.scheduling_db import  MysqlHelper
+from common.scheduling_db import MysqlHelper
 from common.common import Common
 from common.feishu import Feishu
 from common.publish import Publish
+from common.public import get_user_from_mysql, get_config_from_mysql, download_rule
 
 
-class SchedulingFollow:
+class ScheduleXiguaFollow:
     # 个人主页视频翻页参数
     offset = 0
     platform = "西瓜视频"
 
+    @classmethod
+    def download_rule(cls, video_info_dict, rule_dict):
+        if video_info_dict['play_cnt'] >= rule_dict['play_cnt']['min']:
+            if video_info_dict['comment_cnt'] >= rule_dict['comment_cnt']['min']:
+                if video_info_dict['like_cnt'] >= rule_dict['like_cnt']['min']:
+                    if video_info_dict['duration'] >= rule_dict['duration']['min']:
+                        if video_info_dict['video_width'] >= rule_dict['width']['min'] \
+                                or video_info_dict['video_height'] >= rule_dict['height']['min']:
+                            return True
+                        else:
+                            return False
+                    else:
+                        return False
+                else:
+                    return False
+            else:
+                return False
+        else:
+            return False
+
     @classmethod
     def get_users(cls, log_type, crawler, task, env):
         link_list = task['spider_link']
@@ -50,64 +72,6 @@ class SchedulingFollow:
         Common.logger(log_type, crawler).info(f"user_list:{user_list}")
         return user_list
 
-    # 下载规则
-    @classmethod
-    def download_rule_scheduling(cls, video_info_dict, task):
-        try:
-            play_cnt_min = int(task['play_cnt']['min'])
-        except:
-            play_cnt_min = 0
-
-        try:
-            video_like_min = int(task['video_like']['min'])
-        except:
-            video_like_min = 0
-
-        try:
-            share_cnt_min = int(task['share_cnt']['min'])
-        except:
-            share_cnt_min = 0
-
-        try:
-            video_width_min = int(task['video_width']['min'])
-        except:
-            video_width_min = 0
-
-        try:
-            video_height_min = task['video_height']['min']
-        except:
-            video_height_min = 0
-
-        try:
-            duration_min = int(task['duration_min'])
-        except:
-            duration_min = 0
-
-        try:
-            duration_max = int(task['duration_max'])
-        except:
-            duration_max = 1000000000
-
-        if int(video_info_dict['play_cnt']) >= play_cnt_min:
-            if int(video_info_dict['like_cnt']) >= video_like_min:
-                if int(video_info_dict['share_cnt']) >= share_cnt_min:
-                    if duration_max >= int(video_info_dict['duration']) >= duration_min:
-                        if int(video_info_dict['video_width']) >= video_width_min:
-                            if int(video_info_dict['video_height']) >= video_height_min:
-                                return True
-                            else:
-                                return False
-                        else:
-                            return False
-                    else:
-                        return False
-                else:
-                    return False
-            else:
-                return False
-        else:
-            return False
-
     # 过滤词库
     @classmethod
     def filter_words(cls, log_type, crawler):
@@ -197,7 +161,8 @@ class SchedulingFollow:
             # max_retries=3 重试3次
             s.mount('http://', HTTPAdapter(max_retries=3))
             s.mount('https://', HTTPAdapter(max_retries=3))
-            response = s.get(url=url, headers=headers, params=params, cookies=cookies, verify=False, proxies=Common.tunnel_proxies(), timeout=5)
+            response = s.get(url=url, headers=headers, params=params, cookies=cookies, verify=False,
+                             proxies=Common.tunnel_proxies(), timeout=5)
             response.close()
             if 'data' not in response.json() or response.json()['data'] == '':
                 Common.logger(log_type, crawler).warning('get_video_info: response: {}', response)
@@ -212,7 +177,8 @@ class SchedulingFollow:
                     video_url_dict["video_height"] = 0
 
                 elif 'dash_120fps' in video_info['videoResource']:
-                    if "video_list" in video_info['videoResource']['dash_120fps'] and 'video_4' in video_info['videoResource']['dash_120fps']['video_list']:
+                    if "video_list" in video_info['videoResource']['dash_120fps'] and 'video_4' in \
+                            video_info['videoResource']['dash_120fps']['video_list']:
                         video_url = video_info['videoResource']['dash_120fps']['video_list']['video_4']['backup_url_1']
                         audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_4']['backup_url_1']
                         if len(video_url) % 3 == 1:
@@ -231,7 +197,8 @@ class SchedulingFollow:
                         video_url_dict["audio_url"] = audio_url
                         video_url_dict["video_width"] = video_width
                         video_url_dict["video_height"] = video_height
-                    elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_3' in video_info['videoResource']['dash_120fps']['video_list']:
+                    elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_3' in \
+                            video_info['videoResource']['dash_120fps']['video_list']:
                         video_url = video_info['videoResource']['dash_120fps']['video_list']['video_3']['backup_url_1']
                         audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_3']['backup_url_1']
                         if len(video_url) % 3 == 1:
@@ -250,7 +217,8 @@ class SchedulingFollow:
                         video_url_dict["audio_url"] = audio_url
                         video_url_dict["video_width"] = video_width
                         video_url_dict["video_height"] = video_height
-                    elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_2' in video_info['videoResource']['dash_120fps']['video_list']:
+                    elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_2' in \
+                            video_info['videoResource']['dash_120fps']['video_list']:
                         video_url = video_info['videoResource']['dash_120fps']['video_list']['video_2']['backup_url_1']
                         audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_2']['backup_url_1']
                         if len(video_url) % 3 == 1:
@@ -269,7 +237,8 @@ class SchedulingFollow:
                         video_url_dict["audio_url"] = audio_url
                         video_url_dict["video_width"] = video_width
                         video_url_dict["video_height"] = video_height
-                    elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_1' in video_info['videoResource']['dash_120fps']['video_list']:
+                    elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_1' in \
+                            video_info['videoResource']['dash_120fps']['video_list']:
                         video_url = video_info['videoResource']['dash_120fps']['video_list']['video_1']['backup_url_1']
                         audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_1']['backup_url_1']
                         if len(video_url) % 3 == 1:
@@ -292,11 +261,17 @@ class SchedulingFollow:
                     elif 'dynamic_video' in video_info['videoResource']['dash_120fps'] \
                             and 'dynamic_video_list' in video_info['videoResource']['dash_120fps']['dynamic_video'] \
                             and 'dynamic_audio_list' in video_info['videoResource']['dash_120fps']['dynamic_video'] \
-                            and len(video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list']) != 0 \
-                            and len(video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_audio_list']) != 0:
-
-                        video_url = video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1]['backup_url_1']
-                        audio_url = video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_audio_list'][-1]['backup_url_1']
+                            and len(
+                        video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list']) != 0 \
+                            and len(
+                        video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_audio_list']) != 0:
+
+                        video_url = \
+                            video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1][
+                                'backup_url_1']
+                        audio_url = \
+                            video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_audio_list'][-1][
+                                'backup_url_1']
                         if len(video_url) % 3 == 1:
                             video_url += '=='
                         elif len(video_url) % 3 == 2:
@@ -307,8 +282,12 @@ class SchedulingFollow:
                             audio_url += '='
                         video_url = base64.b64decode(video_url).decode('utf8')
                         audio_url = base64.b64decode(audio_url).decode('utf8')
-                        video_width = video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1]['vwidth']
-                        video_height = video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1]['vheight']
+                        video_width = \
+                            video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1][
+                                'vwidth']
+                        video_height = \
+                            video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1][
+                                'vheight']
                         video_url_dict["video_url"] = video_url
                         video_url_dict["audio_url"] = audio_url
                         video_url_dict["video_width"] = video_width
@@ -320,7 +299,8 @@ class SchedulingFollow:
                         video_url_dict["video_height"] = 0
 
                 elif 'dash' in video_info['videoResource']:
-                    if "video_list" in video_info['videoResource']['dash'] and 'video_4' in video_info['videoResource']['dash']['video_list']:
+                    if "video_list" in video_info['videoResource']['dash'] and 'video_4' in \
+                            video_info['videoResource']['dash']['video_list']:
                         video_url = video_info['videoResource']['dash']['video_list']['video_4']['backup_url_1']
                         audio_url = video_info['videoResource']['dash']['video_list']['video_4']['backup_url_1']
                         if len(video_url) % 3 == 1:
@@ -339,7 +319,8 @@ class SchedulingFollow:
                         video_url_dict["audio_url"] = audio_url
                         video_url_dict["video_width"] = video_width
                         video_url_dict["video_height"] = video_height
-                    elif "video_list" in video_info['videoResource']['dash'] and 'video_3' in video_info['videoResource']['dash']['video_list']:
+                    elif "video_list" in video_info['videoResource']['dash'] and 'video_3' in \
+                            video_info['videoResource']['dash']['video_list']:
                         video_url = video_info['videoResource']['dash']['video_list']['video_3']['backup_url_1']
                         audio_url = video_info['videoResource']['dash']['video_list']['video_3']['backup_url_1']
                         if len(video_url) % 3 == 1:
@@ -358,7 +339,8 @@ class SchedulingFollow:
                         video_url_dict["audio_url"] = audio_url
                         video_url_dict["video_width"] = video_width
                         video_url_dict["video_height"] = video_height
-                    elif "video_list" in video_info['videoResource']['dash'] and 'video_2' in video_info['videoResource']['dash']['video_list']:
+                    elif "video_list" in video_info['videoResource']['dash'] and 'video_2' in \
+                            video_info['videoResource']['dash']['video_list']:
                         video_url = video_info['videoResource']['dash']['video_list']['video_2']['backup_url_1']
                         audio_url = video_info['videoResource']['dash']['video_list']['video_2']['backup_url_1']
                         if len(video_url) % 3 == 1:
@@ -377,7 +359,8 @@ class SchedulingFollow:
                         video_url_dict["audio_url"] = audio_url
                         video_url_dict["video_width"] = video_width
                         video_url_dict["video_height"] = video_height
-                    elif "video_list" in video_info['videoResource']['dash'] and 'video_1' in video_info['videoResource']['dash']['video_list']:
+                    elif "video_list" in video_info['videoResource']['dash'] and 'video_1' in \
+                            video_info['videoResource']['dash']['video_list']:
                         video_url = video_info['videoResource']['dash']['video_list']['video_1']['backup_url_1']
                         audio_url = video_info['videoResource']['dash']['video_list']['video_1']['backup_url_1']
                         if len(video_url) % 3 == 1:
@@ -403,8 +386,10 @@ class SchedulingFollow:
                             and len(video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list']) != 0 \
                             and len(video_info['videoResource']['dash']['dynamic_video']['dynamic_audio_list']) != 0:
 
-                        video_url = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1]['backup_url_1']
-                        audio_url = video_info['videoResource']['dash']['dynamic_video']['dynamic_audio_list'][-1]['backup_url_1']
+                        video_url = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
+                            'backup_url_1']
+                        audio_url = video_info['videoResource']['dash']['dynamic_video']['dynamic_audio_list'][-1][
+                            'backup_url_1']
                         if len(video_url) % 3 == 1:
                             video_url += '=='
                         elif len(video_url) % 3 == 2:
@@ -415,8 +400,10 @@ class SchedulingFollow:
                             audio_url += '='
                         video_url = base64.b64decode(video_url).decode('utf8')
                         audio_url = base64.b64decode(audio_url).decode('utf8')
-                        video_width = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1]['vwidth']
-                        video_height = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1]['vheight']
+                        video_width = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
+                            'vwidth']
+                        video_height = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
+                            'vheight']
                         video_url_dict["video_url"] = video_url
                         video_url_dict["audio_url"] = audio_url
                         video_url_dict["video_width"] = video_width
@@ -555,7 +542,7 @@ class SchedulingFollow:
             Common.logger(log_type, crawler).error(f'get_video_url:{e}\n')
 
     @classmethod
-    def get_videolist(cls, log_type, crawler, task, our_uid, out_uid, oss_endpoint, env):
+    def get_videolist(cls, log_type, crawler, strategy, task, our_uid, out_uid, oss_endpoint, env):
         try:
             signature = cls.random_signature()
             while True:
@@ -567,6 +554,8 @@ class SchedulingFollow:
                     'maxBehotTime': '0',
                     'order': 'new',
                     'isHome': '0',
+                    # 'msToken': 'G0eRzNkw189a8TLaXjc6nTHVMQwh9XcxVAqTbGKi7iPJdQcLwS3-XRrJ3MZ7QBfqErpxp3EX1WtvWOIcZ3NIgr41hgcd-v64so_RRj3YCRw1UsKW8mIssNLlIMspsg==',
+                    # 'X-Bogus': 'DFSzswVuEkUANjW9ShFTgR/F6qHt',
                     '_signature': signature,
                 }
                 headers = {
@@ -578,7 +567,8 @@ class SchedulingFollow:
                 # max_retries=3 重试3次
                 s.mount('http://', HTTPAdapter(max_retries=3))
                 s.mount('https://', HTTPAdapter(max_retries=3))
-                response = s.get(url=url, headers=headers, params=params, proxies=Common.tunnel_proxies(), verify=False, timeout=5)
+                response = s.get(url=url, headers=headers, params=params, proxies=Common.tunnel_proxies(), verify=False,
+                                 timeout=5)
                 response.close()
                 cls.offset += 30
                 if response.status_code != 200:
@@ -589,7 +579,7 @@ class SchedulingFollow:
                     Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.text}\n")
                     cls.offset = 0
                     return
-                elif 'videoList' not in response.json()["data"]:
+                elif not response.json()["data"]['videoList']:
                     Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.json()}\n")
                     cls.offset = 0
                     return
@@ -601,7 +591,7 @@ class SchedulingFollow:
                             video_title = 0
                         else:
                             video_title = videoList[i]['title'].strip().replace('手游', '') \
-                                .replace('/', '').replace('\/', '').replace('\n', '')
+                                .replace('/', '').replace('\/', '').replace('\n', '').replace('"', '').replace("'", '')
 
                         # video_id
                         if 'video_id' not in videoList[i]:
@@ -690,64 +680,44 @@ class SchedulingFollow:
                         elif 'url' in videoList[i]['video_detail_info']['detail_video_large_image']:
                             cover_url = videoList[i]['video_detail_info']['detail_video_large_image']['url']
                         else:
-                            cover_url = videoList[i]['video_detail_info']['detail_video_large_image']['url_list'][0]['url']
-
-                        min_publish_time = int(task["min_publish_time"])
-                        min_publish_day = int(task["min_publish_day"])
-                        min_publish_day = (date.today() + timedelta(days=-min_publish_day)).strftime("%Y-%m-%d")
-                        min_publish_day = int(time.mktime(time.strptime(min_publish_day, "%Y-%m-%d")))
-                        if min_publish_time > 0 and min_publish_day > 0:
-                            publish_time_rule = min_publish_time
-                        elif min_publish_time > 0:
-                            publish_time_rule = min_publish_time
-                        else:
-                            publish_time_rule = min_publish_day
-
-                        if gid == 0 or video_id == 0 or cover_url == 0:
-                            Common.logger(log_type, crawler).info('无效视频\n')
-                        elif is_top is True and int(publish_time) < publish_time_rule:
-                            Common.logger(log_type, crawler).info(f'置顶视频,且发布时间超过抓取时间\n')
-                        elif int(publish_time) < publish_time_rule:
-                            Common.logger(log_type, crawler).info(f'发布时间超过抓取时间\n')
-                            cls.offset = 0
-                            return
-                        else:
-                            video_url_dict = cls.get_video_url(log_type, crawler, gid)
-                            video_url = video_url_dict["video_url"]
-                            audio_url = video_url_dict["audio_url"]
-                            video_width = video_url_dict["video_width"]
-                            video_height = video_url_dict["video_height"]
-
-                            video_dict = {'video_title': video_title,
-                                          'video_id': video_id,
-                                          'gid': gid,
-                                          'play_cnt': play_cnt,
-                                          'comment_cnt': comment_cnt,
-                                          'like_cnt': like_cnt,
-                                          'share_cnt': share_cnt,
-                                          'video_width': video_width,
-                                          'video_height': video_height,
-                                          'duration': video_duration,
-                                          'publish_time_stamp': publish_time,
-                                          'publish_time_str': publish_time_str,
-                                          'is_top': is_top,
-                                          'user_name': user_name,
-                                          'user_id': user_id,
-                                          'avatar_url': avatar_url,
-                                          'cover_url': cover_url,
-                                          'audio_url': audio_url,
-                                          'video_url': video_url,
-                                          'session': signature}
-                            for k, v in video_dict.items():
-                                Common.logger(log_type, crawler).info(f"{k}:{v}")
-                            cls.download_publish(log_type=log_type,
-                                                 crawler=crawler,
-                                                 video_dict=video_dict,
-                                                 task=task,
-                                                 strategy=task["task_name"],
-                                                 our_uid=our_uid,
-                                                 oss_endpoint=oss_endpoint,
-                                                 env=env)
+                            cover_url = videoList[i]['video_detail_info']['detail_video_large_image']['url_list'][0][
+                                'url']
+                        video_url_dict = cls.get_video_url(log_type, crawler, gid)
+                        video_url = video_url_dict["video_url"]
+                        audio_url = video_url_dict["audio_url"]
+                        video_width = video_url_dict["video_width"]
+                        video_height = video_url_dict["video_height"]
+
+                        video_dict = {'video_title': video_title,
+                                      'video_id': video_id,
+                                      'gid': gid,
+                                      'play_cnt': play_cnt,
+                                      'comment_cnt': comment_cnt,
+                                      'like_cnt': like_cnt,
+                                      'share_cnt': share_cnt,
+                                      'video_width': video_width,
+                                      'video_height': video_height,
+                                      'duration': video_duration,
+                                      'publish_time_stamp': publish_time,
+                                      'publish_time_str': publish_time_str,
+                                      'is_top': is_top,
+                                      'user_name': user_name,
+                                      'user_id': user_id,
+                                      'avatar_url': avatar_url,
+                                      'cover_url': cover_url,
+                                      'audio_url': audio_url,
+                                      'video_url': video_url,
+                                      'session': signature}
+                        for k, v in video_dict.items():
+                            Common.logger(log_type, crawler).info(f"{k}:{v}")
+                        cls.download_publish(log_type=log_type,
+                                             crawler=crawler,
+                                             strategy=strategy,
+                                             video_dict=video_dict,
+                                             task=task,
+                                             our_uid=our_uid,
+                                             oss_endpoint=oss_endpoint,
+                                             env=env)
         except Exception as e:
             Common.logger(log_type, crawler).error(f"get_videolist:{e}\n")
 
@@ -761,19 +731,26 @@ class SchedulingFollow:
     @classmethod
     def download_publish(cls, log_type, crawler, strategy, video_dict, task, our_uid, oss_endpoint, env):
         try:
-            if cls.download_rule_scheduling(video_dict, task) is False:
+            filter_words = get_config_from_mysql(log_type, crawler, env, text='filter')
+            for filter_word in filter_words:
+                if filter_word in video_dict['video_title']:
+                    Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
+                    return
+            if download_rule(log_type, crawler, video_dict, task['rule_dict']) is False:
                 Common.logger(log_type, crawler).info('不满足抓取规则\n')
-            elif any(word if word in video_dict['video_title'] else False for word in cls.filter_words(log_type, crawler)) is True:
-                Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
+
             elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
                 Common.logger(log_type, crawler).info('视频已下载\n')
             else:
                 # 下载视频
-                Common.download_method(log_type=log_type, crawler=crawler, text='xigua_video', title=video_dict['video_title'], url=video_dict['video_url'])
+                Common.download_method(log_type=log_type, crawler=crawler, text='xigua_video',
+                                       title=video_dict['video_title'], url=video_dict['video_url'])
                 # 下载音频
-                Common.download_method(log_type=log_type, crawler=crawler, text='xigua_audio', title=video_dict['video_title'], url=video_dict['audio_url'])
+                Common.download_method(log_type=log_type, crawler=crawler, text='xigua_audio',
+                                       title=video_dict['video_title'], url=video_dict['audio_url'])
                 # 合成音视频
-                Common.video_compose(log_type=log_type, crawler=crawler, video_dir=f"./{crawler}/videos/{video_dict['video_title']}")
+                Common.video_compose(log_type=log_type, crawler=crawler,
+                                     video_dir=f"./{crawler}/videos/{video_dict['video_title']}")
                 md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
                 if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
                     # 删除视频文件夹
@@ -787,7 +764,8 @@ class SchedulingFollow:
                 #     shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
                 #     return
                 # 下载封面
-                Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
+                Common.download_method(log_type=log_type, crawler=crawler, text='cover',
+                                       title=video_dict['video_title'], url=video_dict['cover_url'])
                 # 保存视频信息至txt
                 Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
 
@@ -884,29 +862,26 @@ class SchedulingFollow:
 
     @classmethod
     def get_follow_videos(cls, log_type, crawler, task, oss_endpoint, env):
-        try:
-            user_list = cls.get_users(log_type=log_type,
-                                      crawler=crawler,
-                                      task=task,
-                                      env=env)
-            for user in user_list:
-                out_uid = user["out_uid"]
-                our_uid = int(user["our_uid"])
-                if our_uid == 0:
-                    pass
-                else:
-                    Common.logger(log_type, crawler).info(f"开始抓取 {out_uid} 用户主页视频\n")
-                    cls.get_videolist(log_type=log_type,
-                                      crawler=crawler,
-                                      task=task,
-                                      our_uid=our_uid,
-                                      out_uid=out_uid,
-                                      oss_endpoint=oss_endpoint,
-                                      env=env)
-                    cls.offset = 0
-                    time.sleep(1)
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f"get_follow_videos:{e}\n")
+        user_list = get_user_from_mysql(log_type, crawler, crawler, env)
+        strategy = '定向抓取策略'
+        for user in user_list:
+            try:
+                spider_link = user["link"]
+                out_uid = spider_link.split('/')[-1]
+                user_name = user["nick_name"]
+                our_uid = user["uid"]
+                Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
+                cls.get_videolist(log_type=log_type,
+                                  crawler=crawler,
+                                  strategy=strategy,
+                                  task=task,
+                                  our_uid=our_uid,
+                                  out_uid=out_uid,
+                                  oss_endpoint=oss_endpoint,
+                                  env=env)
+                cls.offset = 0
+            except Exception as e:
+                Common.logger(log_type, crawler).error(f"get_follow_videos:{e}\n")
 
 
 if __name__ == '__main__':
@@ -916,5 +891,5 @@ if __name__ == '__main__':
     #                            env="dev",
     #                            machine="local")
 
-    print(SchedulingFollow.repeat_video("follow", "xigua", "v0201ag10000ce3jcjbc77u8jsplpgrg", "dev"))
+    print(ScheduleXiguaFollow.repeat_video("follow", "xigua", "v0201ag10000ce3jcjbc77u8jsplpgrg", "dev"))
     pass

+ 32 - 18
xigua/xigua_main/run_xigua_follow_scheduling.py

@@ -1,27 +1,28 @@
 # -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/2/17
+# @Author: lierqiang
+# @Time: 2023/4/21
 import argparse
 import os
 import sys
 
 sys.path.append(os.getcwd())
 from common.common import Common
-from xigua.xigua_follow.xigua_follow_scheduling import SchedulingFollow
+from xigua.xigua_follow.xigua_follow_scheduling import ScheduleXiguaFollow
+from common.public import task_fun
+
+
 # from common.feishu import Feishu
 
 
 def main(log_type, crawler, task, oss_endpoint, env):
-    task = dict(eval(task))
-    Common.logger(log_type, crawler).info(f"{type(task)}\n")
-    Common.logger(log_type, crawler).info(f"{task}\n")
+    task = task_fun(task)
     try:
         Common.logger(log_type, crawler).info('开始抓取 西瓜视频 定向榜\n')
-        SchedulingFollow.get_follow_videos(log_type=log_type,
-                                           crawler=crawler,
-                                           task=task,
-                                           oss_endpoint=oss_endpoint,
-                                           env=env)
+        ScheduleXiguaFollow.get_follow_videos(log_type=log_type,
+                                              crawler=crawler,
+                                              task=task,
+                                              oss_endpoint=oss_endpoint,
+                                              env=env)
         Common.del_logs(log_type, crawler)
         Common.logger(log_type, crawler).info('抓取任务结束\n')
     except Exception as e:
@@ -31,16 +32,29 @@ def main(log_type, crawler, task, oss_endpoint, env):
 
 if __name__ == "__main__":
     parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler')  ## 添加参数
-    parser.add_argument('--strategy')  ## 添加参数
+    parser.add_argument('--log_type', default='author')  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler', default='xigua')  ## 添加参数
+    parser.add_argument('--strategy', default='定向抓取')  ## 添加参数
     parser.add_argument('--task')  ## 添加参数
-    parser.add_argument('--oss_endpoint')  ## 添加参数
-    parser.add_argument('--env')  ## 添加参数
+    parser.add_argument('--oss_endpoint', default='outer')  ## 添加参数
+    parser.add_argument('--env', default='dev')  ## 添加参数
     # parser.add_argument('--machine')  ## 添加参数
     args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    task = {
+        'task_dict': {'task_id': '17', 'task_name': '西瓜测试4.21', 'source': 'xigua', 'start_time': '1682010720000',
+                      'interval': '24', 'mode': 'author',
+                      'rule': {'duration': {'min': 40, 'max': 0}, 'playCnt': {'min': 4000, 'max': 0},
+                               'period': {'min': 10, 'max': 0}, 'fans': {'min': 0, 'max': 0},
+                               'videos': {'min': 0, 'max': 0}, 'like': {'min': 0, 'max': 0},
+                               'videoWidth': {'min': 0, 'max': 0}, 'videoHeight': {'min': 0, 'max': 0}},
+                      'spider_name': 'run_xiguan_author_scheduling', 'machine': 'aliyun', 'status': '0',
+                      'create_time': '1682048632396', 'update_time': '1682048632396', 'operator': ''},
+        'rule_dict': {'duration': {'min': 40, 'max': 0}, 'playCnt': {'min': 4000, 'max': 0},
+                      'period': {'min': 10, 'max': 0}, 'fans': {'min': 0, 'max': 0}, 'videos': {'min': 0, 'max': 0},
+                      'like': {'min': 0, 'max': 0}, 'videoWidth': {'min': 0, 'max': 0},
+                      'videoHeight': {'min': 0, 'max': 0}}}
     main(log_type=args.log_type,
          crawler=args.crawler,
-         task=args.task,
+         task=task,
          oss_endpoint=args.oss_endpoint,
-         env=args.env)
+         env=args.env)

برخی فایل ها در این مقایسه diff نمایش داده نمی شوند زیرا تعداد فایل ها بسیار زیاد است