ソースを参照

add 快手作者接入调度器

lierqiang 2 年 前
コミット
53cda1f5dd

+ 248 - 449
kuaishou/kuaishou_follow/kuaishou_follow_scheduling.py

@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# @Author: wangkun
+# @Author: lierqiang
 # @Time: 2023/2/24
 import os
 import random
@@ -13,69 +13,70 @@ import json
 
 import urllib3
 from requests.adapters import HTTPAdapter
-from selenium import webdriver
-from selenium.webdriver import DesiredCapabilities
-from selenium.webdriver.chrome.service import Service
 
 sys.path.append(os.getcwd())
 from common.common import Common
 from common.feishu import Feishu
 from common.getuser import getUser
-from common.db import MysqlHelper
+# from common.db import MysqlHelper
+from common.scheduling_db import MysqlHelper
 from common.publish import Publish
+from common.public import random_title, get_config_from_mysql
+from common.public import get_user_from_mysql
+from common.userAgent import get_random_user_agent
 
 
-class Follow:
+class KuaiShouFollowScheduling:
     platform = "快手"
     tag = "快手爬虫,定向爬虫策略"
 
     @classmethod
     def get_rule(cls, log_type, crawler, index):
         try:
-            while True:
-                rule_sheet = Feishu.get_values_batch(log_type, crawler, "3iqG4z")
-                if rule_sheet is None:
-                    Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
-                    time.sleep(10)
-                    continue
-                if index == 1:
-                    rule_dict = {
-                        "play_cnt": f"{rule_sheet[1][1]}{rule_sheet[1][2]}",
-                        "video_width": f"{rule_sheet[2][1]}{rule_sheet[2][2]}",
-                        "video_height": f"{rule_sheet[3][1]}{rule_sheet[3][2]}",
-                        "like_cnt": f"{rule_sheet[4][1]}{rule_sheet[4][2]}",
-                        "duration": f"{rule_sheet[5][1]}{rule_sheet[5][2]}",
-                        "download_cnt": f"{rule_sheet[6][1]}{rule_sheet[6][2]}",
-                        "publish_time": f"{rule_sheet[7][1]}{rule_sheet[7][2]}",
-                    }
-                    # for k, v in rule_dict.items():
-                    #     Common.logger(log_type, crawler).info(f"{k}:{v}")
-                    return rule_dict
-                elif index == 2:
-                    rule_dict = {
-                        "play_cnt": f"{rule_sheet[9][1]}{rule_sheet[9][2]}",
-                        "video_width": f"{rule_sheet[10][1]}{rule_sheet[10][2]}",
-                        "video_height": f"{rule_sheet[11][1]}{rule_sheet[11][2]}",
-                        "like_cnt": f"{rule_sheet[12][1]}{rule_sheet[12][2]}",
-                        "duration": f"{rule_sheet[13][1]}{rule_sheet[13][2]}",
-                        "download_cnt": f"{rule_sheet[14][1]}{rule_sheet[14][2]}",
-                        "publish_time": f"{rule_sheet[15][1]}{rule_sheet[15][2]}",
-                    }
-                    # for k, v in rule_dict.items():
-                    #     Common.logger(log_type, crawler).info(f"{k}:{v}")
-                    return rule_dict
+            rule_sheet = Feishu.get_values_batch(log_type, crawler, "3iqG4z")
+            if index == 1:
+                rule_dict = {
+                    "play_cnt": f"{rule_sheet[1][1]}{rule_sheet[1][2]}",
+                    "video_width": f"{rule_sheet[2][1]}{rule_sheet[2][2]}",
+                    "video_height": f"{rule_sheet[3][1]}{rule_sheet[3][2]}",
+                    "like_cnt": f"{rule_sheet[4][1]}{rule_sheet[4][2]}",
+                    "duration": f"{rule_sheet[5][1]}{rule_sheet[5][2]}",
+                    "download_cnt": f"{rule_sheet[6][1]}{rule_sheet[6][2]}",
+                    "publish_time": f"{rule_sheet[7][1]}{rule_sheet[7][2]}",
+                }
+                # for k, v in rule_dict.items():
+                #     Common.logger(log_type, crawler).info(f"{k}:{v}")
+                return rule_dict
+            elif index == 2:
+                rule_dict = {
+                    "play_cnt": f"{rule_sheet[9][1]}{rule_sheet[9][2]}",
+                    "video_width": f"{rule_sheet[10][1]}{rule_sheet[10][2]}",
+                    "video_height": f"{rule_sheet[11][1]}{rule_sheet[11][2]}",
+                    "like_cnt": f"{rule_sheet[12][1]}{rule_sheet[12][2]}",
+                    "duration": f"{rule_sheet[13][1]}{rule_sheet[13][2]}",
+                    "download_cnt": f"{rule_sheet[14][1]}{rule_sheet[14][2]}",
+                    "publish_time": f"{rule_sheet[15][1]}{rule_sheet[15][2]}",
+                }
+                # for k, v in rule_dict.items():
+                #     Common.logger(log_type, crawler).info(f"{k}:{v}")
+                return rule_dict
         except Exception as e:
             Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
 
     @classmethod
     def download_rule(cls, video_dict, rule_dict):
-        if eval(f"{video_dict['play_cnt']}{rule_dict['play_cnt']}") is True \
-                and eval(f"{video_dict['video_width']}{rule_dict['video_width']}") is True \
-                and eval(f"{video_dict['video_height']}{rule_dict['video_height']}") is True \
-                and eval(f"{video_dict['like_cnt']}{rule_dict['like_cnt']}") is True \
-                and eval(f"{video_dict['duration']}{rule_dict['duration']}") is True \
-                and eval(f"{video_dict['publish_time']}{rule_dict['publish_time']}") is True:
-            return True
+        if video_dict['like_cnt'] >= rule_dict['like_cnt']['min']:
+            if video_dict['publish_time'] >= rule_dict['publish_time']['min']:
+                if video_dict['duration'] >= rule_dict['duration']['min']:
+                    if video_dict['video_width'] >= rule_dict['width']['min'] \
+                            or video_dict['video_height'] >= rule_dict['height']['min']:
+                        return True
+                    else:
+                        return False
+                else:
+                    return False
+            else:
+                return False
         else:
             return False
 
@@ -99,56 +100,31 @@ class Follow:
         except Exception as e:
             Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
 
-    # 万能标题
-    @classmethod
-    def random_title(cls, log_type, crawler):
-        try:
-            while True:
-                random_title_sheet = Feishu.get_values_batch(log_type, crawler, '0DiyXe')
-                if random_title_sheet is None:
-                    Common.logger(log_type, crawler).warning(f"filter_words_sheet:{random_title_sheet} 10秒钟后重试")
-                    continue
-                random_title_list = []
-                for x in random_title_sheet:
-                    for y in x:
-                        if y is None:
-                            pass
-                        else:
-                            random_title_list.append(y)
-                return random.choice(random_title_list)
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f'random_title:{e}\n')
-
     # 获取站外用户信息
     @classmethod
     def get_out_user_info(cls, log_type, crawler, out_uid):
         try:
             url = "https://www.kuaishou.com/graphql"
+
             payload = json.dumps({
                 "operationName": "visionProfile",
                 "variables": {
-                    "userId": str(out_uid)
+                    "userId": out_uid
                 },
                 "query": "query visionProfile($userId: String) {\n  visionProfile(userId: $userId) {\n    result\n    hostName\n    userProfile {\n      ownerCount {\n        fan\n        photo\n        follow\n        photo_public\n        __typename\n      }\n      profile {\n        gender\n        user_name\n        user_id\n        headurl\n        user_text\n        user_profile_bg_url\n        __typename\n      }\n      isFollowing\n      __typename\n    }\n    __typename\n  }\n}\n"
             })
             headers = {
-                # 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId={"".join(str(random.choice(range(1, 10))) for _ in range(10))}; kpn=KUAISHOU_VISION; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABE4wGjnJauApJelOpl9Xqo8TVDAyra7Pvo0rZtVgMSZxgVuw4Z6P2UtHv_CHOk2Ne2el1hdE_McCptWs8tRdtYlhXFlVOu8rQX7CwexzOBudJAfB3lDN8LPc4o4qHNwqFxy5J5j_WzdllbqMmaDUK9yUxX6XA-JFezzq9jvBwtGv7_hzB7pFrUcH39z0EYOQaZo5lDl-pE09Gw7wr8NvlZRoSdWlbobCW6oJxuQLJTUr9oj_uIiBhkeb1psaIIc3VwfYQ1UfvobrXAP_WpnRabE_3UZUBOygFMAE; kuaishou.server.web_ph=2b981e2051d7130c977fd31df97fe6f5ad54',
-                'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABszwASP0eGT2ym0NUdGf1PTi8_gDYrDms9otf5FKMUrx0V7MOTT1hhvpVJijmgku8KYtu3a6g3X1hZODMpX0ebZUip44txWPoY3VRqbsnBKEOs9Qkmx3uLaX33nq8KevOKLoO0cIE8nfwMJISe_BtCHr22cbJkofI0xfJXBt_ZgPqJIqWsUwdgwKzqRYn47ROkIqzeDfEjbxaZxiXy22ZhhoSlCobbmtjkvjpY9x730BPP_C5IiAl9EBaCTyvMw5IIcnkmcjMeWC8w6LKzeFMCNhcqrfkgCgFMAE; kuaishou.server.web_ph=19ae269a54f619c49def39ef5b54ae6d4353; kpn=KUAISHOU_VISION',
-                'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
-                'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
-                'content-type': 'application/json',
-                'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
-                'Cache-Control': 'no-cache',
-                'Connection': 'keep-alive',
+                'Accept': '*/*',
+                'Content-Type': 'application/json',
                 'Origin': 'https://www.kuaishou.com',
-                'Pragma': 'no-cache',
-                'Sec-Fetch-Dest': 'empty',
-                'Sec-Fetch-Mode': 'cors',
-                'Sec-Fetch-Site': 'same-origin',
-                'accept': '*/*',
-                'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
-                'sec-ch-ua-mobile': '?0',
-                'sec-ch-ua-platform': '"macOS"'
+                'Cookie': 'did=web_5d4d0dff78b7819f8b015e7a81e2ca98;; clientid=3; kpf=PC_WEB; kpn=KUAISHOU_VISION',
+                'Content-Length': '552',
+                'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
+                'Host': 'www.kuaishou.com',
+                'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6.1 Safari/605.1.15',
+                'Referer': 'https://www.kuaishou.com/profile/{}'.format(out_uid),
+                'Accept-Encoding': 'gzip, deflate, br',
+                'Connection': 'keep-alive'
             }
             urllib3.disable_warnings()
             s = requests.session()
@@ -216,7 +192,7 @@ class Follow:
 
     # 获取用户信息列表
     @classmethod
-    def get_user_list(cls, log_type, crawler, sheetid, env, machine):
+    def get_user_list(cls, log_type, crawler, sheetid, env):
         try:
             while True:
                 user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
@@ -251,7 +227,7 @@ class Follow:
                                 "tag": cls.tag,
                             }
                             our_user_dict = getUser.create_user(log_type=log_type, crawler=crawler,
-                                                              out_user_dict=out_user_dict, env=env, machine=machine)
+                                                                out_user_dict=out_user_dict, env=env)
                             our_uid = our_user_dict['our_uid']
                             our_user_link = our_user_dict['our_user_link']
                             Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
@@ -272,7 +248,7 @@ class Follow:
 
     # 处理视频标题
     @classmethod
-    def video_title(cls, log_type, crawler, title):
+    def video_title(cls, log_type, crawler, env, title):
         title_split1 = title.split(" #")
         if title_split1[0] != "":
             title1 = title_split1[0]
@@ -297,361 +273,186 @@ class Follow:
                           .replace("#", "").replace(".", "。").replace("\\", "") \
                           .replace(":", "").replace("*", "").replace("?", "") \
                           .replace("?", "").replace('"', "").replace("<", "") \
-                          .replace(">", "").replace("|", "").replace("@", "")[:40]
+                          .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
         if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
-            return cls.random_title(log_type, crawler)
+            return random_title(log_type, crawler, env, text='title')
         else:
             return video_title
 
     @classmethod
-    def get_cookie(cls, log_type, crawler, out_uid, machine):
+    def get_videoList(cls, log_type, crawler, strategy, task, our_uid, out_uid, oss_endpoint, env, pcursor=""):
+        rule_dict_1 = task['rule_dict']
+        url = "https://www.kuaishou.com/graphql"
+        payload = json.dumps({
+            "operationName": "visionProfilePhotoList",
+            "variables": {
+                "userId": out_uid,
+                "pcursor": "",
+                "page": "profile"
+            },
+            "query": "fragment photoContent on PhotoEntity {\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  commentCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  __typename\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n  visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n    result\n    llsid\n    webPageArea\n    feeds {\n      ...feedContent\n      __typename\n    }\n    hostName\n    pcursor\n    __typename\n  }\n}\n"
+        })
+        headers = {
+            'Accept': '*/*',
+            'Content-Type': 'application/json',
+            'Origin': 'https://www.kuaishou.com',
+            'Cookie': 'kpf=PC_WEB; clientid=3; did=web_44b06f828a7810da393092aa6bb8dde0; kpn=KUAISHOU_VISION',
+            'Content-Length': '1260',
+            'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
+            'Host': 'www.kuaishou.com',
+            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6.1 Safari/605.1.15',
+            'Referer': 'https://www.kuaishou.com/profile/{}'.format(out_uid),
+            'Accept-Encoding': 'gzip, deflate, br',
+            'Connection': 'keep-alive'
+        }
         try:
-            # 打印请求配置
-            ca = DesiredCapabilities.CHROME
-            ca["goog:loggingPrefs"] = {"performance": "ALL"}
-
-            # 不打开浏览器运行
-            chrome_options = webdriver.ChromeOptions()
-            chrome_options.add_argument("headless")
-            chrome_options.add_argument(
-                f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
-            chrome_options.add_argument("--no-sandbox")
-
-            # driver初始化
-            if machine == "aliyun":
-                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
-            elif machine == "macpro":
-                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options,
-                                          service=Service('/Users/lieyunye/Downloads/chromedriver_v107/chromedriver'))
-            elif machine == "macair":
-                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options,
-                                          service=Service('/Users/piaoquan/Downloads/chromedriver_v108/chromedriver'))
-            else:
-                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
-                    '/Users/wangkun/Downloads/chromedriver/chromedriver_v109/chromedriver'))
-
-            driver.implicitly_wait(10)
-            # print('打开个人主页')
-            driver.get(f'https://www.kuaishou.com/profile/{out_uid}')
-            time.sleep(1)
-
-            # print('解析cookies')
-            logs = driver.get_log("performance")
-            # Common.logger(log_type, crawler).info('已获取logs:{}\n', logs)
-            # print('退出浏览器')
-            driver.quit()
-            for line in logs:
-                msg = json.loads(line['message'])
-                # Common.logger(log_type, crawler).info(f"{msg}\n\n")
-                if 'message' not in msg:
-                    pass
-                elif 'params' not in msg['message']:
-                    pass
-                elif 'headers' not in msg['message']['params']:
-                    pass
-                elif 'Cookie' not in msg['message']['params']['headers']:
-                    pass
-                elif msg['message']['params']['headers']['Host'] != 'www.kuaishou.com':
-                    pass
-                else:
-                    cookie = msg['message']['params']['headers']['Cookie']
-                    # Common.logger(log_type, crawler).info(f"{cookie}")
-                    return cookie
+            response = requests.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(),
+                                     verify=False, timeout=10)
+            feeds = response.json()['data']['visionProfilePhotoList']['feeds']
         except Exception as e:
-            Common.logger(log_type, crawler).error(f"get_cookie:{e}\n")
-
-    @classmethod
-    def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
-        try:
-            download_cnt_1, download_cnt_2 = 0, 0
-            pcursor = ""
+            Common.logger(log_type, crawler).error(f"get_videoList:{e},response:{response.text}")
+            return
+        if not feeds:
+            Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
+            return
+        pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
+        for i in range(len(feeds)):
+            # video_title
+            if 'caption' not in feeds[i]['photo']:
+                video_title = random_title(log_type, crawler, env, text='title')
+            elif feeds[i]['photo']['caption'].strip() == "":
+                video_title = random_title(log_type, crawler, env, text='title')
+            else:
+                video_title = cls.video_title(log_type, crawler, env, feeds[i]['photo']['caption'])
+
+            if 'videoResource' not in feeds[i]['photo'] \
+                    and 'manifest' not in feeds[i]['photo'] \
+                    and 'manifestH265' not in feeds[i]['photo']:
+                Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
+                break
+            videoResource = feeds[i]['photo']['videoResource']
+
+            if 'h264' not in videoResource and 'hevc' not in videoResource:
+                Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
+                break
+
+            # video_id
+            if 'h264' in videoResource and 'videoId' in videoResource['h264']:
+                video_id = videoResource['h264']['videoId']
+            elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
+                video_id = videoResource['hevc']['videoId']
+            else:
+                video_id = ""
 
-            while True:
-                rule_dict_1 = cls.get_rule(log_type, crawler, 1)
-                rule_dict_2 = cls.get_rule(log_type, crawler, 2)
-                if rule_dict_1 is None or rule_dict_2 is None:
-                    Common.logger(log_type, crawler).warning(f"rule_dict is None, 10秒后重试")
-                    time.sleep(10)
-                else:
-                    break
+            # play_cnt
+            if 'viewCount' not in feeds[i]['photo']:
+                play_cnt = 0
+            else:
+                play_cnt = int(feeds[i]['photo']['viewCount'])
 
-            while True:
-                if download_cnt_1 >= int(
-                        rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[
-                            -1]) and download_cnt_2 >= int(
-                        rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
-                    Common.logger(log_type, crawler).info(
-                        f"规则1已下载{download_cnt_1}条视频,规则2已下载{download_cnt_2}条视频\n")
-                    return
+            # like_cnt
+            if 'realLikeCount' not in feeds[i]['photo']:
+                like_cnt = 0
+            else:
+                like_cnt = feeds[i]['photo']['realLikeCount']
 
-                url = "https://www.kuaishou.com/graphql"
-                payload = json.dumps({
-                    "operationName": "visionProfilePhotoList",
-                    "variables": {
-                        "userId": out_uid,
-                        "pcursor": pcursor,
-                        "page": "profile"
-                    },
-                    "query": "fragment photoContent on PhotoEntity {\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  __typename\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n  visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n    result\n    llsid\n    webPageArea\n    feeds {\n      ...feedContent\n      __typename\n    }\n    hostName\n    pcursor\n    __typename\n  }\n}\n"
-                })
-                # get_cookie = cls.get_cookie(log_type, crawler, out_uid, machine)
-                # if get_cookie is None:
-                #     cookie = 'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABszwASP0eGT2ym0NUdGf1PTi8_gDYrDms9otf5FKMUrx0V7MOTT1hhvpVJijmgku8KYtu3a6g3X1hZODMpX0ebZUip44txWPoY3VRqbsnBKEOs9Qkmx3uLaX33nq8KevOKLoO0cIE8nfwMJISe_BtCHr22cbJkofI0xfJXBt_ZgPqJIqWsUwdgwKzqRYn47ROkIqzeDfEjbxaZxiXy22ZhhoSlCobbmtjkvjpY9x730BPP_C5IiAl9EBaCTyvMw5IIcnkmcjMeWC8w6LKzeFMCNhcqrfkgCgFMAE; kuaishou.server.web_ph=19ae269a54f619c49def39ef5b54ae6d4353; kpn=KUAISHOU_VISION'
-                # else:
-                #     cookie = get_cookie
-                # Common.logger(log_type, crawler).info(f"cookie:{cookie}")
-                headers = {
-                    # 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId={"".join(str(random.choice(range(1, 10))) for _ in range(10))}; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABOLgYYcIJ5ilxU46Jc-HLWThY8sppX3V0htC_KhSGOzAjP2hAOdegzfkZGAxS5rf6rCBS487FkxfYzLkV__I6b1lK16rDjvv94Kkoo4z7mgf8y8rFgWoqrp81JAWTtx00y-wrc1XXPf9RAVQoET70wWaeNG2r5bxtZEiNwpK_zPi0ZdUo0BW13dFKfVssAy2xKYh0UlJ8VSd_vBvyMKSxVBoSf061Kc3w5Nem7YdpVBmH39ceIiBpiGioLzbZqlHiSbwkH_LhUhNXz3o7LITj098KUytk2CgFMAE; kuaishou.server.web_ph=f1033957981996a7d50e849a9ded4cf4adff; kpn=KUAISHOU_VISION',
-                    # 'Cookie': cookie,
-                    'Cookie': 'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABszwASP0eGT2ym0NUdGf1PTi8_gDYrDms9otf5FKMUrx0V7MOTT1hhvpVJijmgku8KYtu3a6g3X1hZODMpX0ebZUip44txWPoY3VRqbsnBKEOs9Qkmx3uLaX33nq8KevOKLoO0cIE8nfwMJISe_BtCHr22cbJkofI0xfJXBt_ZgPqJIqWsUwdgwKzqRYn47ROkIqzeDfEjbxaZxiXy22ZhhoSlCobbmtjkvjpY9x730BPP_C5IiAl9EBaCTyvMw5IIcnkmcjMeWC8w6LKzeFMCNhcqrfkgCgFMAE; kuaishou.server.web_ph=19ae269a54f619c49def39ef5b54ae6d4353; kpn=KUAISHOU_VISION',
-                    'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
-                    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
-                    'content-type': 'application/json',
-                    # 'accept': '*/*',
-                    # 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
-                    # 'Cache-Control': 'no-cache',
-                    # 'Connection': 'keep-alive',
-                    # 'Origin': 'https://www.kuaishou.com',
-                    # 'Pragma': 'no-cache',
-                    # 'Sec-Fetch-Dest': 'empty',
-                    # 'Sec-Fetch-Mode': 'cors',
-                    # 'Sec-Fetch-Site': 'same-origin',
-                    # 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
-                    # 'sec-ch-ua-mobile': '?0',
-                    # 'sec-ch-ua-platform': '"macOS"'
-                }
-                urllib3.disable_warnings()
-                s = requests.session()
-                # max_retries=3 重试3次
-                s.mount('http://', HTTPAdapter(max_retries=3))
-                s.mount('https://', HTTPAdapter(max_retries=3))
-                response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False,
-                                  timeout=5)
-                response.close()
-                # Common.logger(log_type, crawler).info(f"get_videoList:{response.text}\n")
-                if response.status_code != 200:
-                    Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.text}\n")
-                    return
-                elif 'data' not in response.json():
-                    Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
-                    return
-                elif 'visionProfilePhotoList' not in response.json()['data']:
-                    Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
-                    return
-                elif 'feeds' not in response.json()['data']['visionProfilePhotoList']:
-                    Common.logger(log_type, crawler).warning(
-                        f"get_videoList_response:{response.json()['data']['visionProfilePhotoList']}\n")
-                    return
-                elif len(response.json()['data']['visionProfilePhotoList']['feeds']) == 0:
-                    Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
-                    return
-                else:
-                    feeds = response.json()['data']['visionProfilePhotoList']['feeds']
-                    pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
-                    # Common.logger(log_type, crawler).info(f"feeds0: {feeds}\n")
-                    for i in range(len(feeds)):
-                        if 'photo' not in feeds[i]:
-                            Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]}\n")
-                            break
-
-                        # video_title
-                        if 'caption' not in feeds[i]['photo']:
-                            video_title = cls.random_title(log_type, crawler)
-                        elif feeds[i]['photo']['caption'].strip() == "":
-                            video_title = cls.random_title(log_type, crawler)
-                        else:
-                            video_title = cls.video_title(log_type, crawler, feeds[i]['photo']['caption'])
-
-                        if 'videoResource' not in feeds[i]['photo'] \
-                                and 'manifest' not in feeds[i]['photo'] \
-                                and 'manifestH265' not in feeds[i]['photo']:
-                            Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
-                            break
-                        videoResource = feeds[i]['photo']['videoResource']
-
-                        if 'h264' not in videoResource and 'hevc' not in videoResource:
-                            Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
-                            break
-
-                        # video_id
-                        if 'h264' in videoResource and 'videoId' in videoResource['h264']:
-                            video_id = videoResource['h264']['videoId']
-                        elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
-                            video_id = videoResource['hevc']['videoId']
-                        else:
-                            video_id = ""
+            # publish_time
+            if 'timestamp' not in feeds[i]['photo']:
+                publish_time_stamp = 0
+                publish_time_str = ''
+                publish_time = 0
+            else:
+                publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
+                publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
 
-                        # play_cnt
-                        if 'viewCount' not in feeds[i]['photo']:
-                            play_cnt = 0
-                        else:
-                            play_cnt = int(feeds[i]['photo']['viewCount'])
+            # duration
+            if 'duration' not in feeds[i]['photo']:
+                duration = 0
+            else:
+                duration = int(int(feeds[i]['photo']['duration']) / 1000)
 
-                        # like_cnt
-                        if 'realLikeCount' not in feeds[i]['photo']:
-                            like_cnt = 0
-                        else:
-                            like_cnt = feeds[i]['photo']['realLikeCount']
+            # video_width / video_height / video_url
+            mapping = {}
+            for item in ['width', 'height']:
+                try:
+                    val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
+                except:
+                    val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
+                mapping[item] = val
+            video_width = int(mapping['width']) if mapping['width'] else 0
+            video_height = int(mapping['height']) if mapping['height'] else 0
+            # cover_url
+            if 'coverUrl' not in feeds[i]['photo']:
+                cover_url = ""
+            else:
+                cover_url = feeds[i]['photo']['coverUrl']
+
+            # user_name / avatar_url
+            user_name = feeds[i]['author']['name']
+            avatar_url = feeds[i]['author']['headerUrl']
+
+            video_url = feeds[i]['photo']['photoUrl']
+            video_dict = {'video_title': video_title,
+                          'video_id': video_id,
+                          'play_cnt': play_cnt,
+                          'comment_cnt': 0,
+                          'like_cnt': like_cnt,
+                          'share_cnt': 0,
+                          'video_width': video_width,
+                          'video_height': video_height,
+                          'duration': duration,
+                          'publish_time': publish_time,
+                          'publish_time_stamp': publish_time_stamp,
+                          'publish_time_str': publish_time_str,
+                          'user_name': user_name,
+                          'user_id': out_uid,
+                          'avatar_url': avatar_url,
+                          'cover_url': cover_url,
+                          'video_url': video_url,
+                          'session': f"kuaishou{int(time.time())}"}
+            for k, v in video_dict.items():
+                Common.logger(log_type, crawler).info(f"{k}:{v}")
+            rule_1 = cls.download_rule(video_dict, rule_dict_1)
+            if rule_1 is True:
+                cls.download_publish(log_type=log_type,
+                                     crawler=crawler,
+                                     strategy=strategy,
+                                     video_dict=video_dict,
+                                     rule_dict=rule_dict_1,
+                                     our_uid=our_uid,
+                                     oss_endpoint=oss_endpoint,
+                                     env=env,
+                                     )
 
-                        # publish_time
-                        if 'timestamp' not in feeds[i]['photo']:
-                            publish_time_stamp = 0
-                            publish_time_str = ''
-                            publish_time = 0
-                        else:
-                            publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
-                            publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-                            publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
 
-                        # duration
-                        if 'duration' not in feeds[i]['photo']:
-                            duration = 0
-                        else:
-                            duration = int(int(feeds[i]['photo']['duration']) / 1000)
-
-                        # video_width / video_height / video_url
-                        mapping = {}
-                        for item in ['width', 'height', 'url']:
-                            try:
-                                val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
-                            except Exception:
-                                val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
-                            except:
-                                val = ''
-                            mapping[item] = val
-                        video_width = int(mapping['width']) if mapping['width'] != '' else 0
-                        video_height = int(mapping['height']) if mapping['height'] != '' else 0
-                        video_url = mapping['url']
-
-                        # cover_url
-                        if 'coverUrl' not in feeds[i]['photo']:
-                            cover_url = ""
-                        else:
-                            cover_url = feeds[i]['photo']['coverUrl']
-
-                        # user_name / avatar_url
-                        try:
-                            user_name = feeds[i]['author']['name']
-                            avatar_url = feeds[i]['author']['headerUrl']
-                        except Exception:
-                            user_name = ''
-                            avatar_url = ''
-
-                        video_dict = {'video_title': video_title,
-                                      'video_id': video_id,
-                                      'play_cnt': play_cnt,
-                                      'comment_cnt': 0,
-                                      'like_cnt': like_cnt,
-                                      'share_cnt': 0,
-                                      'video_width': video_width,
-                                      'video_height': video_height,
-                                      'duration': duration,
-                                      'publish_time': publish_time,
-                                      'publish_time_stamp': publish_time_stamp,
-                                      'publish_time_str': publish_time_str,
-                                      'user_name': user_name,
-                                      'user_id': out_uid,
-                                      'avatar_url': avatar_url,
-                                      'cover_url': cover_url,
-                                      'video_url': video_url,
-                                      'session': f"kuaishou{int(time.time())}"}
-
-                        rule_1 = cls.download_rule(video_dict, rule_dict_1)
-                        Common.logger(log_type, crawler).info(f"video_title:{video_title}")
-                        Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
-
-                        Common.logger(log_type, crawler).info(
-                            f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
-                        Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
-
-                        rule_2 = cls.download_rule(video_dict, rule_dict_2)
-                        Common.logger(log_type, crawler).info(
-                            f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
-                        Common.logger(log_type, crawler).info(
-                            f"publish_time:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
-                        Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
-
-                        if video_title == "" or video_url == "":
-                            Common.logger(log_type, crawler).info("无效视频\n")
-                            break
-                        elif rule_1 is True:
-                            if download_cnt_1 < int(
-                                    rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
-                                                                                                                  "")[
-                                        -1]):
-                                download_finished = cls.download_publish(log_type=log_type,
-                                                                         crawler=crawler,
-                                                                         strategy=strategy,
-                                                                         video_dict=video_dict,
-                                                                         rule_dict=rule_dict_1,
-                                                                         our_uid=our_uid,
-                                                                         oss_endpoint=oss_endpoint,
-                                                                         env=env,
-                                                                         machine=machine)
-                                if download_finished is True:
-                                    download_cnt_1 += 1
-                        elif rule_2 is True:
-                            if download_cnt_2 < int(
-                                    rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
-                                                                                                                  "")[
-                                        -1]):
-                                download_finished = cls.download_publish(log_type=log_type,
-                                                                         crawler=crawler,
-                                                                         strategy=strategy,
-                                                                         video_dict=video_dict,
-                                                                         rule_dict=rule_dict_2,
-                                                                         our_uid=our_uid,
-                                                                         oss_endpoint=oss_endpoint,
-                                                                         env=env,
-                                                                         machine=machine)
-                                if download_finished is True:
-                                    download_cnt_2 += 1
-                        else:
-                            Common.logger(log_type, crawler).info("不满足下载规则\n")
-                            # Common.logger(log_type, crawler).info(f"feeds: {feeds}\n")
+            else:
+                Common.logger(log_type, crawler).info("不满足下载规则\n")
 
-                    if pcursor == "no_more":
-                        Common.logger(log_type, crawler).info("已经到底了,没有更多内容了\n")
-                        return
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
+            # if pcursor == "no_more":
+            #     Common.logger(log_type, crawler).info(f"作者,{out_uid},已经到底了,没有更多内容了\n")
+            #     return
+            # cls.get_videoList(log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env,
+            #               pcursor=pcursor)
+            # time.sleep(random.randint(1, 3))
 
     @classmethod
-    def repeat_video(cls, log_type, crawler, video_id, video_title, publish_time, env, machine):
+    def repeat_video(cls, log_type, crawler, video_id, video_title, publish_time, env):
         sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" or (platform="{cls.platform}" and video_title="{video_title}" and publish_time="{publish_time}") """
-        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         return len(repeat_video)
 
     @classmethod
-    def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
+    def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env):
         try:
-            download_finished = False
+            filter_words = get_config_from_mysql(log_type, crawler, env, text='filter')
+            for filter_word in filter_words:
+                if filter_word in video_dict['video_title']:
+                    Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
+                    return
             if cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'],
-                                video_dict['publish_time_str'], env, machine) != 0:
-                Common.logger(log_type, crawler).info('视频已下载\n')
-            elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, crawler, "3cd128") for x in y]:
+                                video_dict['publish_time_str'], env) != 0:
                 Common.logger(log_type, crawler).info('视频已下载\n')
-            elif any(word if word in video_dict['video_title'] else False for word in
-                     cls.filter_words(log_type, crawler)) is True:
-                Common.logger(log_type, crawler).info('标题已中过滤词\n')
             else:
                 # 下载视频
                 Common.download_method(log_type=log_type, crawler=crawler, text='video',
@@ -693,7 +494,7 @@ class Follow:
                     Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
                     # 删除视频文件夹
                     shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-                    return download_finished
+                    return
 
                 # 视频信息保存数据库
                 insert_sql = f""" insert into crawler_video(video_id,
@@ -727,7 +528,7 @@ class Follow:
                                                         {int(video_dict['video_width'])},
                                                         {int(video_dict['video_height'])}) """
                 Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-                MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
+                MysqlHelper.update_values(log_type, crawler, insert_sql, env)
                 Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
 
                 # 视频写入飞书
@@ -755,43 +556,41 @@ class Follow:
                 Feishu.update_values(log_type, 'kuaishou', "fYdA8F", "E2:Z2", values)
                 Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
                 download_finished = True
-            return download_finished
+            return
         except Exception as e:
             Common.logger(log_type, crawler).error(f"download_publish:{e}\n")
 
     @classmethod
-    def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
-        user_list = cls.get_user_list(log_type=log_type, crawler=crawler, sheetid="bTSzxW", env=env, machine=machine)
+    def get_follow_videos(cls, log_type, crawler, task, oss_endpoint, env):
+        user_list = get_user_from_mysql(log_type, crawler, crawler, env)
+        strategy = '定向抓取策略'
         for user in user_list:
-            out_uid = user["out_uid"]
-            user_name = user["user_name"]
-            our_uid = user["our_uid"]
-            Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
-            cls.get_videoList(log_type=log_type,
-                              crawler=crawler,
-                              strategy=strategy,
-                              our_uid=our_uid,
-                              out_uid=out_uid,
-                              oss_endpoint=oss_endpoint,
-                              env=env,
-                              machine=machine)
-            sleep_time = 120
-            Common.logger(log_type, crawler).info(f"休眠{sleep_time}秒\n")
-            time.sleep(sleep_time)
+            try:
+                spider_link = user["link"]
+                out_uid = spider_link.split('/')[-1]
+                user_name = user["nick_name"]
+                our_uid = user["uid"]
+                Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
+                cls.get_videoList(log_type=log_type,
+                                  crawler=crawler,
+                                  strategy=strategy,
+                                  task=task,
+                                  our_uid=our_uid,
+                                  out_uid=out_uid,
+                                  oss_endpoint=oss_endpoint,
+                                  env=env)
+            except Exception as e:
+                continue
 
 
 if __name__ == "__main__":
-    # Follow.get_videoList(log_type="follow",
-    #                      crawler="kuaishou",
-    #                      strategy="定向爬虫策略",
-    #                      our_uid="6282431",
-    #                      out_uid="3xws7ydsnmp5mgq",
-    #                      oss_endpoint="out",
-    #                      env="dev",
-    #                      machine="local")
-    # print(Follow.get_out_user_info("follow", "kuaishou", "3xgh4ja9be3wcaw"))
+    KuaiShouFollowScheduling.get_follow_videos(
+        log_type="follow",
+        crawler="kuaishou",
+        task="",
+        oss_endpoint="out",
+        env="dev",
+    )
+
+    # print(KuaiShouFollow.get_out_user_info("follow", "kuaishou", "3xnk3wbm3vfiha6"))
     # print(Follow.get_out_user_info("follow", "kuaishou", "3x5wgjhfc7tx8ue"))
-    print(Follow.get_cookie("cookies", "kuaishou", "3xvp5w6twj77xeq", "local"))
-    print(Follow.get_cookie("cookies", "kuaishou", "3xgh4ja9be3wcaw", "local"))
-    print(Follow.get_cookie("cookies", "kuaishou", "3x5wgjhfc7tx8ue", "local"))
-    pass

+ 40 - 32
kuaishou/kuaishou_main/run_kuaishou_follow_scheduling.py

@@ -1,49 +1,57 @@
 # -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/2/27
+# @Author: lierqiang
+# @Time: 2023/4/21
 import argparse
 import os
 import sys
-# import time
 
 sys.path.append(os.getcwd())
 from common.common import Common
-# from common.feishu import Feishu
-from kuaishou.kuaishou_follow.kuaishou_follow_scheduling import Follow
+from kuaishou.kuaishou_follow.kuaishou_follow_scheduling import KuaiShouFollowScheduling
+from common.public import task_fun
 
 
-def main(log_type, crawler, strategy, oss_endpoint, env, machine):
-    while True:
-        try:
-            Common.logger(log_type, crawler).info('开始抓取 快手 定向榜\n')
-            Follow.get_follow_videos(log_type=log_type,
-                                     crawler=crawler,
-                                     strategy=strategy,
-                                     oss_endpoint=oss_endpoint,
-                                     env=env,
-                                     machine=machine)
-            Common.del_logs(log_type, crawler)
-            Common.logger(log_type, crawler).info('抓取完一轮\n')
-            break
-        except Exception as e:
-            Common.logger(log_type, crawler).info(f"快手定向榜异常,触发报警:{e}\n")
-            # Feishu.bot(log_type, crawler, f"快手定向榜异常,触发报警:{e}")
+def main(log_type, crawler, task, oss_endpoint, env):
+    # task = task_fun(task)
+    try:
+        Common.logger(log_type, crawler).info(f'开始抓取 {crawler}视频 定向榜\n')
+        KuaiShouFollowScheduling.get_follow_videos(log_type=log_type,
+                                             crawler=crawler,
+                                             task=task,
+                                             oss_endpoint=oss_endpoint,
+                                             env=env)
+        Common.del_logs(log_type, crawler)
+        Common.logger(log_type, crawler).info('抓取任务结束\n')
+    except Exception as e:
+        Common.logger(log_type, crawler).info(f"{crawler}视频异常,触发报警:{e}\n")
+        # Feishu.bot(log_type, crawler, f"{e}")
 
 
 if __name__ == "__main__":
     parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler')  ## 添加参数
-    parser.add_argument('--strategy')  ## 添加参数
-    parser.add_argument('--our_uid')  ## 添加参数
-    parser.add_argument('--oss_endpoint')  ## 添加参数
-    parser.add_argument('--env')  ## 添加参数
-    parser.add_argument('--machine')  ## 添加参数
+    parser.add_argument('--log_type', default='author')  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler', default='kuaishou')  ## 添加参数
+    parser.add_argument('--strategy', default='定向抓取')  ## 添加参数
+    parser.add_argument('--task')  ## 添加参数
+    parser.add_argument('--oss_endpoint', default='outer')  ## 添加参数
+    parser.add_argument('--env', default='dev')  ## 添加参数
+    # parser.add_argument('--machine')  ## 添加参数
     args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    # print(args)
+    task = {
+        'task_dict': {'task_id': '17', 'task_name': '西瓜测试4.21', 'source': 'kuaishou', 'start_time': '1682010720000',
+                      'interval': '24', 'mode': 'author',
+                      'rule': {'duration': {'min': 40, 'max': 0}, 'play_cnt': {'min': 4000, 'max': 0},
+                               'period': {'min': 10, 'max': 0}, 'fans_cnt': {'min': 0, 'max': 0},
+                               'videos_cnt': {'min': 0, 'max': 0}, 'like_cnt': {'min': 0, 'max': 0},
+                               'width': {'min': 0, 'max': 0}, 'height': {'min': 0, 'max': 0}},
+                      'spider_name': 'run_dy_author_scheduling', 'machine': 'aliyun', 'status': '0',
+                      'create_time': '1682048632396', 'update_time': '1682048632396', 'operator': ''},
+        'rule_dict': {'duration': {'min': 0, 'max': 0}, 'play_cnt': {'min': 0, 'max': 0},
+                      'period': {'min': 0, 'max': 0}, 'fans_cnt': {'min': 0, 'max': 0}, 'videos_cnt': {'min': 0, 'max': 0},
+                      'like_cnt': {'min': 0, 'max': 0}, 'width': {'min': 0, 'max': 0},
+                      'height': {'min': 0, 'max': 0},'publish_time':{'min':0}}}
     main(log_type=args.log_type,
          crawler=args.crawler,
-         strategy=args.strategy,
+         task=task,
          oss_endpoint=args.oss_endpoint,
-         env=args.env,
-         machine=args.machine)
+         env=args.env)