wangkun 2 lat temu
rodzic
commit
44a0c7b2f5

+ 138 - 45
kuaishou/kuaishou_follow/kuaishou_follow.py

@@ -11,6 +11,9 @@ import json
 
 import urllib3
 from requests.adapters import HTTPAdapter
+from selenium import webdriver
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
 
 sys.path.append(os.getcwd())
 from common.common import Common
@@ -64,11 +67,11 @@ class Follow:
 
     @classmethod
     def download_rule(cls, video_dict, rule_dict):
-        if eval(f"{video_dict['play_cnt']}{rule_dict['play_cnt']}") is True\
-                and eval(f"{video_dict['video_width']}{rule_dict['video_width']}") is True\
-                and eval(f"{video_dict['video_height']}{rule_dict['video_height']}") is True\
-                and eval(f"{video_dict['like_cnt']}{rule_dict['like_cnt']}") is True\
-                and eval(f"{video_dict['duration']}{rule_dict['duration']}") is True\
+        if eval(f"{video_dict['play_cnt']}{rule_dict['play_cnt']}") is True \
+                and eval(f"{video_dict['video_width']}{rule_dict['video_width']}") is True \
+                and eval(f"{video_dict['video_height']}{rule_dict['video_height']}") is True \
+                and eval(f"{video_dict['like_cnt']}{rule_dict['like_cnt']}") is True \
+                and eval(f"{video_dict['duration']}{rule_dict['duration']}") is True \
                 and eval(f"{video_dict['publish_time']}{rule_dict['publish_time']}") is True:
             return True
         else:
@@ -150,7 +153,8 @@ class Follow:
             # max_retries=3 重试3次
             s.mount('http://', HTTPAdapter(max_retries=3))
             s.mount('https://', HTTPAdapter(max_retries=3))
-            response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False, timeout=5)
+            response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False,
+                              timeout=5)
             response.close()
             # Common.logger(log_type, crawler).info(f"get_out_user_info_response:{response.text}")
             if response.status_code != 200:
@@ -163,7 +167,8 @@ class Follow:
                 Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()['data']}\n")
                 return
             elif 'userProfile' not in response.json()['data']['visionProfile']:
-                Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()['data']['visionProfile']['userProfile']}\n")
+                Common.logger(log_type, crawler).warning(
+                    f"get_out_user_info_response:{response.json()['data']['visionProfile']['userProfile']}\n")
                 return
             else:
                 userProfile = response.json()['data']['visionProfile']['userProfile']
@@ -218,7 +223,7 @@ class Follow:
                     continue
                 our_user_list = []
                 for i in range(1, len(user_sheet)):
-                # for i in range(1, 2):
+                    # for i in range(1, 2):
                     out_uid = user_sheet[i][2]
                     user_name = user_sheet[i][3]
                     our_uid = user_sheet[i][6]
@@ -312,8 +317,12 @@ class Follow:
                     break
 
             while True:
-                if download_cnt_1 >=int(rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]) and download_cnt_2 >= int(rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
-                    Common.logger(log_type, crawler).info(f"规则1已下载{download_cnt_1}条视频,规则2已下载{download_cnt_2}条视频\n")
+                if download_cnt_1 >= int(
+                        rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[
+                            -1]) and download_cnt_2 >= int(
+                        rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
+                    Common.logger(log_type, crawler).info(
+                        f"规则1已下载{download_cnt_1}条视频,规则2已下载{download_cnt_2}条视频\n")
                     return
 
                 url = "https://www.kuaishou.com/graphql"
@@ -326,9 +335,14 @@ class Follow:
                     },
                     "query": "fragment photoContent on PhotoEntity {\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  __typename\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n  visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n    result\n    llsid\n    webPageArea\n    feeds {\n      ...feedContent\n      __typename\n    }\n    hostName\n    pcursor\n    __typename\n  }\n}\n"
                 })
+                get_cookie = cls.get_cookie(log_type, crawler, out_uid, machine)
+                if get_cookie is None:
+                    cookie = 'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABszwASP0eGT2ym0NUdGf1PTi8_gDYrDms9otf5FKMUrx0V7MOTT1hhvpVJijmgku8KYtu3a6g3X1hZODMpX0ebZUip44txWPoY3VRqbsnBKEOs9Qkmx3uLaX33nq8KevOKLoO0cIE8nfwMJISe_BtCHr22cbJkofI0xfJXBt_ZgPqJIqWsUwdgwKzqRYn47ROkIqzeDfEjbxaZxiXy22ZhhoSlCobbmtjkvjpY9x730BPP_C5IiAl9EBaCTyvMw5IIcnkmcjMeWC8w6LKzeFMCNhcqrfkgCgFMAE; kuaishou.server.web_ph=19ae269a54f619c49def39ef5b54ae6d4353; kpn=KUAISHOU_VISION'
+                else:
+                    cookie = get_cookie
                 headers = {
                     # 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId={"".join(str(random.choice(range(1, 10))) for _ in range(10))}; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABOLgYYcIJ5ilxU46Jc-HLWThY8sppX3V0htC_KhSGOzAjP2hAOdegzfkZGAxS5rf6rCBS487FkxfYzLkV__I6b1lK16rDjvv94Kkoo4z7mgf8y8rFgWoqrp81JAWTtx00y-wrc1XXPf9RAVQoET70wWaeNG2r5bxtZEiNwpK_zPi0ZdUo0BW13dFKfVssAy2xKYh0UlJ8VSd_vBvyMKSxVBoSf061Kc3w5Nem7YdpVBmH39ceIiBpiGioLzbZqlHiSbwkH_LhUhNXz3o7LITj098KUytk2CgFMAE; kuaishou.server.web_ph=f1033957981996a7d50e849a9ded4cf4adff; kpn=KUAISHOU_VISION',
-                    'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABszwASP0eGT2ym0NUdGf1PTi8_gDYrDms9otf5FKMUrx0V7MOTT1hhvpVJijmgku8KYtu3a6g3X1hZODMpX0ebZUip44txWPoY3VRqbsnBKEOs9Qkmx3uLaX33nq8KevOKLoO0cIE8nfwMJISe_BtCHr22cbJkofI0xfJXBt_ZgPqJIqWsUwdgwKzqRYn47ROkIqzeDfEjbxaZxiXy22ZhhoSlCobbmtjkvjpY9x730BPP_C5IiAl9EBaCTyvMw5IIcnkmcjMeWC8w6LKzeFMCNhcqrfkgCgFMAE; kuaishou.server.web_ph=19ae269a54f619c49def39ef5b54ae6d4353; kpn=KUAISHOU_VISION',
+                    'Cookie': cookie,
                     'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
                     'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
                     'content-type': 'application/json',
@@ -350,7 +364,8 @@ class Follow:
                 # max_retries=3 重试3次
                 s.mount('http://', HTTPAdapter(max_retries=3))
                 s.mount('https://', HTTPAdapter(max_retries=3))
-                response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False, timeout=5)
+                response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False,
+                                  timeout=5)
                 response.close()
                 # Common.logger(log_type, crawler).info(f"get_videoList:{response.text}\n")
                 if response.status_code != 200:
@@ -363,7 +378,8 @@ class Follow:
                     Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
                     return
                 elif 'feeds' not in response.json()['data']['visionProfilePhotoList']:
-                    Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']['visionProfilePhotoList']}\n")
+                    Common.logger(log_type, crawler).warning(
+                        f"get_videoList_response:{response.json()['data']['visionProfilePhotoList']}\n")
                     return
                 elif len(response.json()['data']['visionProfilePhotoList']['feeds']) == 0:
                     Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
@@ -386,8 +402,8 @@ class Follow:
                             video_title = cls.video_title(log_type, crawler, feeds[i]['photo']['caption'])
 
                         if 'videoResource' not in feeds[i]['photo'] \
-                                and 'manifest' not in feeds[i]['photo']\
-                                and 'manifestH265'not in feeds[i]['photo']:
+                                and 'manifest' not in feeds[i]['photo'] \
+                                and 'manifestH265' not in feeds[i]['photo']:
                             Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
                             break
                         videoResource = feeds[i]['photo']['videoResource']
@@ -422,15 +438,15 @@ class Follow:
                             publish_time_str = ''
                             publish_time = 0
                         else:
-                            publish_time_stamp = int(int(feeds[i]['photo']['timestamp'])/1000)
+                            publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
                             publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-                            publish_time = int((int(time.time()) - publish_time_stamp) / (3600*24))
+                            publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
 
                         # duration
                         if 'duration' not in feeds[i]['photo']:
                             duration = 0
                         else:
-                            duration = int(int(feeds[i]['photo']['duration'])/1000)
+                            duration = int(int(feeds[i]['photo']['duration']) / 1000)
 
                         # video_width / video_height / video_url
                         mapping = {}
@@ -483,28 +499,43 @@ class Follow:
                         Common.logger(log_type, crawler).info(f"video_title:{video_title}")
                         Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
 
-                        Common.logger(log_type, crawler).info(f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
-                        Common.logger(log_type, crawler).info(f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
-                        Common.logger(log_type, crawler).info(f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
-                        Common.logger(log_type, crawler).info(f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
-                        Common.logger(log_type, crawler).info(f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
-                        Common.logger(log_type, crawler).info(f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
                         Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
 
                         rule_2 = cls.download_rule(video_dict, rule_dict_2)
-                        Common.logger(log_type, crawler).info(f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
-                        Common.logger(log_type, crawler).info(f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
-                        Common.logger(log_type, crawler).info(f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
-                        Common.logger(log_type, crawler).info(f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
-                        Common.logger(log_type, crawler).info(f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
-                        Common.logger(log_type, crawler).info(f"publish_time:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
+                        Common.logger(log_type, crawler).info(
+                            f"publish_time:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
                         Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
 
                         if video_title == "" or video_url == "":
                             Common.logger(log_type, crawler).info("无效视频\n")
                             break
                         elif rule_1 is True:
-                            if download_cnt_1 < int(rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
+                            if download_cnt_1 < int(
+                                    rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
+                                                                                                                  "")[
+                                        -1]):
                                 download_finished = cls.download_publish(log_type=log_type,
                                                                          crawler=crawler,
                                                                          strategy=strategy,
@@ -517,7 +548,10 @@ class Follow:
                                 if download_finished is True:
                                     download_cnt_1 += 1
                         elif rule_2 is True:
-                            if download_cnt_2 < int(rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
+                            if download_cnt_2 < int(
+                                    rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
+                                                                                                                  "")[
+                                        -1]):
                                 download_finished = cls.download_publish(log_type=log_type,
                                                                          crawler=crawler,
                                                                          strategy=strategy,
@@ -549,23 +583,28 @@ class Follow:
     def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
         try:
             download_finished = False
-            if cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'], video_dict['publish_time_str'], env, machine) != 0:
+            if cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'],
+                                video_dict['publish_time_str'], env, machine) != 0:
                 Common.logger(log_type, crawler).info('视频已下载\n')
             elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, crawler, "3cd128") for x in y]:
                 Common.logger(log_type, crawler).info('视频已下载\n')
-            elif any(word if word in video_dict['video_title'] else False for word in cls.filter_words(log_type, crawler)) is True:
+            elif any(word if word in video_dict['video_title'] else False for word in
+                     cls.filter_words(log_type, crawler)) is True:
                 Common.logger(log_type, crawler).info('标题已中过滤词\n')
             else:
                 # 下载视频
-                Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
-                ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+                Common.download_method(log_type=log_type, crawler=crawler, text='video',
+                                       title=video_dict['video_title'], url=video_dict['video_url'])
+                ffmpeg_dict = Common.ffmpeg(log_type, crawler,
+                                            f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
                 if ffmpeg_dict is None or ffmpeg_dict['size'] == 0:
                     Common.logger(log_type, crawler).warning(f"下载的视频无效,已删除\n")
                     # 删除视频文件夹
                     shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
                     return download_finished
                 # 下载封面
-                Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
+                Common.download_method(log_type=log_type, crawler=crawler, text='cover',
+                                       title=video_dict['video_title'], url=video_dict['cover_url'])
                 # 保存视频信息至txt
                 Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
 
@@ -671,11 +710,65 @@ class Follow:
                               machine=machine)
             time.sleep(3)
 
+    @classmethod
+    def get_cookie(cls, log_type, crawler, out_uid, machine):
+        try:
+            # 打印请求配置
+            ca = DesiredCapabilities.CHROME
+            ca["goog:loggingPrefs"] = {"performance": "ALL"}
+
+            # 不打开浏览器运行
+            chrome_options = webdriver.ChromeOptions()
+            chrome_options.add_argument("headless")
+            chrome_options.add_argument(
+                f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
+            chrome_options.add_argument("--no-sandbox")
+
+            # driver初始化
+            if machine == "aliyun":
+                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
+            elif machine == "macpro":
+                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options,
+                                          service=Service('/Users/lieyunye/Downloads/chromedriver_v107/chromedriver'))
+            elif machine == "macair":
+                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options,
+                                          service=Service('/Users/piaoquan/Downloads/chromedriver_v108/chromedriver'))
+            else:
+                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
+                    '/Users/wangkun/Downloads/chromedriver/chromedriver_v109/chromedriver'))
+
+            driver.implicitly_wait(10)
+            # print('打开个人主页')
+            driver.get(f'https://www.kuaishou.com/profile/{out_uid}')
+            time.sleep(1)
+
+            # print('解析cookies')
+            logs = driver.get_log("performance")
+            # Common.logger(log_type, crawler).info('已获取logs:{}\n', logs)
+            # print('退出浏览器')
+            driver.quit()
+            for line in logs:
+                msg = json.loads(line['message'])
+                # Common.logger(log_type, crawler).info(f"{msg}\n\n")
+                if 'message' not in msg:
+                    pass
+                elif 'params' not in msg['message']:
+                    pass
+                elif 'headers' not in msg['message']['params']:
+                    pass
+                elif 'Cookie' not in msg['message']['params']['headers']:
+                    pass
+                elif msg['message']['params']['headers']['Host'] != 'www.kuaishou.com':
+                    pass
+                else:
+                    cookie = msg['message']['params']['headers']['Cookie']
+                    # Common.logger(log_type, crawler).info(f"{cookie}")
+                    return cookie
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_cookie:{e}\n")
+
 
 if __name__ == "__main__":
-    # print(Follow.filter_words("follow", "kuaishou"))
-    # print(Follow.random_title("follow", "kuaishou"))
-    # Follow.get_user_list("follow", "kuaishou", "2OLxLr", "dev", "local")
     # Follow.get_videoList(log_type="follow",
     #                      crawler="kuaishou",
     #                      strategy="定向爬虫策略",
@@ -684,9 +777,9 @@ if __name__ == "__main__":
     #                      oss_endpoint="out",
     #                      env="dev",
     #                      machine="local")
-    # Follow.get_rule("follow", "kuaishou", 1)
-    # Follow.get_rule("follow", "kuaishou", 2)
-    print(Follow.get_out_user_info("follow", "kuaishou", "3xgh4ja9be3wcaw"))
-    print(Follow.get_out_user_info("follow", "kuaishou", "3x5wgjhfc7tx8ue"))
-
+    # print(Follow.get_out_user_info("follow", "kuaishou", "3xgh4ja9be3wcaw"))
+    # print(Follow.get_out_user_info("follow", "kuaishou", "3x5wgjhfc7tx8ue"))
+    print(Follow.get_cookie("cookies", "kuaishou", "3xvp5w6twj77xeq", "local"))
+    print(Follow.get_cookie("cookies", "kuaishou", "3xgh4ja9be3wcaw", "local"))
+    print(Follow.get_cookie("cookies", "kuaishou", "3x5wgjhfc7tx8ue", "local"))
     pass

+ 3 - 0
scheduling/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/2

+ 103 - 0
scheduling/crawler_scheduling.py

@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/2
+import os
+import sys
+import time
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.db import MysqlHelper
+
+
+class Scheduling:
+    # 任务列表
+    task_list = []
+
+    # 读取任务表
+    @classmethod
+    def get_task(cls, log_type, crawler, env, machine):
+        get_sql = """ select * from crawler_task_1 """
+        all_task_list = MysqlHelper.get_values(log_type=log_type, crawler=crawler, sql=get_sql, env=env, machine=machine)
+        pre_task_list = []
+        for task in all_task_list:
+            task_id = task[0]
+            task_name = task[1]
+            source = task[2]
+            next_time = task[3]
+            interval_piaoquan = task[4]
+            spider_rule = task[5]
+            task_type = task[6]
+            spider_link = task[7]
+            spider_name = task[8]
+            min_publish_time = task[9]
+            min_publish_day = task[10]
+            media_id = task[11]
+            applets_status = task[12]
+            app_status = task[13]
+            user_tag = task[14]
+            user_content_tag = task[15]
+            machine = task[16]
+            insert_time = task[17]
+            update_time = task[18]
+            if next_time >= int(time.time()):
+                task_dict = {
+                    "task_id": task_id,
+                    "task_name": task_name,
+                    "source": source,
+                    "next_time": next_time,
+                    "interval_piaoquan": interval_piaoquan,
+                    "spider_rule": spider_rule,
+                    "task_type": task_type,
+                    "spider_link": spider_link,
+                    "spider_name": spider_name,
+                    "min_publish_time": min_publish_time,
+                    "min_publish_day": min_publish_day,
+                    "media_id": media_id,
+                    "applets_status": applets_status,
+                    "app_status": app_status,
+                    "user_tag": user_tag,
+                    "user_content_tag": user_content_tag,
+                    "machine": machine,
+                    "insert_time": insert_time,
+                    "update_time": update_time,
+                }
+                pre_task_list.append(task_dict)
+
+        return pre_task_list
+
+
+    # 组装任务
+    @classmethod
+    def update_task(cls, log_type, crawler, env, machine):
+        pre_task_list = cls.get_task(log_type=log_type, crawler=crawler, env=env, machine=machine)
+        if len(pre_task_list) == 0:
+            Common.logger(log_type, crawler).info("暂无新任务\n")
+        else:
+            for i in range(len(pre_task_list)):
+                task_id = pre_task_list[i]["task_id"]
+                task_name = pre_task_list[i]["task_name"]
+                next_time = pre_task_list[i]["next_time"]
+                interval_piaoquan = pre_task_list[i]["interval_piaoquan"]
+                spider_rule = pre_task_list[i]["spider_rule"]
+                print(f"task_id:{task_id}")
+                print(f"task_name:{task_name}")
+                print(f"next_time:{next_time}")
+                print(f"interval_piaoquan:{interval_piaoquan}")
+                print(f"spider_rule:{spider_rule}\n")
+
+    # 资源分配
+    @classmethod
+    def resource_allocation(cls, log_type, crawler, env, machine):
+        pass
+
+    # 写入任务队列
+    @classmethod
+    def write_to_queue(cls):
+        pass
+
+
+if __name__ == "__main__":
+    # task_list = Scheduling.get_task("Scheduling", "scheduling", "dev", "local")
+    # print(task_list)
+    Scheduling.update_task("Scheduling", "scheduling", "dev", "local")
+    pass

+ 1 - 1
weixinzhishu/weixinzhishu_main/get_weixinzhishu.py

@@ -240,6 +240,6 @@ class Weixinzhishu:
 
 
 if __name__ == "__main__":
-    Weixinzhishu.get_score_test('weixin', 'weixinzhishu', 1 , "原因")
+    Weixinzhishu.get_score_test('weixin', 'weixinzhishu', 1 , "必要")
 
     pass

+ 60 - 63
weixinzhishu/weixinzhishu_main/weixinzhishu_test.py

@@ -74,75 +74,72 @@ class Test:
                     Common.logger(log_type, crawler).info(
                         f"{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(time.time())))} auth 过期,休眠 10 秒,重新获取")
                     time.sleep(10)
-                else:
-                    search_key = wechat_key[0]
-                    openid = wechat_key[-1]
-                    break
+                    continue
 
-            url = "https://search.weixin.qq.com/cgi-bin/wxaweb/wxindex"
-            payload = json.dumps({
-                "openid": openid,
-                "search_key": search_key,
-                "cgi_name": "GetDefaultIndex",
-                "start_ymd": start_ymd,
-                "end_ymd": end_ymd,
-                "query": word_list[i]
-            })
-            headers = {
-                'Host': 'search.weixin.qq.com',
-                'content-type': 'application/json',
-                'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.32(0x1800202a) NetType/WIFI Language/zh_CN',
-                'Referer': 'https://servicewechat.com/wxc026e7662ec26a3a/42/page-frame.html'
-            }
-            response = requests.request("POST", url, headers=headers, data=payload)
-            wechat_score_list = []
-            word_wechat_score_dict = {
-                "id": i+1,
-                "word": word_list[i],
-                "wechatScores": wechat_score_list,
-            }
-            # if response.json()['code'] == -10000:
-            #     Common.logger(log_type, crawler).info(f"{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(time.time())))} response:{response.json()['msg']} 休眠 10 秒,重新获取")
-            #     time.sleep(10)
-            #     cls.get_score_test(log_type, crawler)
-            if response.json()['code'] == -10002:
-                Common.logger(log_type, crawler).info("该词暂未收录")
-                # 写飞书
-                if word_list[i] in [x for y in Feishu.get_values_batch(log_type, crawler, "5011a2") for x in y]:
-                    Common.logger(log_type, crawler).info("该词已存在")
+                search_key = wechat_key[0]
+                openid = wechat_key[-1]
+                url = "https://search.weixin.qq.com/cgi-bin/wxaweb/wxindex"
+                payload = json.dumps({
+                    "openid": openid,
+                    "search_key": search_key,
+                    "cgi_name": "GetDefaultIndex",
+                    "start_ymd": start_ymd,
+                    "end_ymd": end_ymd,
+                    "query": word_list[i]
+                })
+                headers = {
+                    'Host': 'search.weixin.qq.com',
+                    'content-type': 'application/json',
+                    'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.32(0x1800202a) NetType/WIFI Language/zh_CN',
+                    'Referer': 'https://servicewechat.com/wxc026e7662ec26a3a/42/page-frame.html'
+                }
+                response = requests.request("POST", url, headers=headers, data=payload)
+                if response.json()['code'] == -10000:
+                    Common.logger(log_type, crawler).info(f"{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(time.time())))} response:{response.json()['msg']} 休眠 10 秒,重新获取")
+                    time.sleep(10)
                     continue
-                Feishu.insert_columns(log_type, crawler, "5011a2", "ROWS", 1, 2)
-                time.sleep(0.5)
-                Feishu.update_values(log_type, crawler, "5011a2", "F2:Z2",
-                                     [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
-                                       word_list[i],
-                                       "",
-                                       "该词暂未收录"]])
-                Common.logger(log_type, crawler).info("写入飞书成功\n")
-            elif response.json()['code'] != 0:
-                Common.logger(log_type, crawler).warning(f"{word_wechat_score_dict}")
-                continue
-            else:
-                time_index = response.json()['content']['resp_list'][0]['indexes'][0]['time_indexes']
-                for x in range(len(time_index)):
-                    Common.logger(log_type, crawler).info(f"正在更新 {word_list[i]}")
-                    score_time = time_index[x]['time']
-                    score_time_str = f"{str(score_time)[:4]}-{str(score_time)[4:6]}-{str(score_time)[6:]}"
-                    score = time_index[x]['score']
-                    wechat_score_dict = {"score": score, "scoreDate": score_time_str}
-                    wechat_score_list.append(wechat_score_dict)
-                    Common.logger(log_type, crawler).info(f"wechat_score_dict:{wechat_score_dict}")
+
+                wechat_score_list = []
+                word_wechat_score_dict = {
+                    "id": i+1,
+                    "word": word_list[i],
+                    "wechatScores": wechat_score_list,
+                }
+                if response.json()['code'] == -10002:
+                    Common.logger(log_type, crawler).info("该词暂未收录")
                     # 写飞书
-                    # if word_list[i] in [x for y in Feishu.get_values_batch(log_type, crawler, "5011a2") for x in y]:
-                    #     Common.logger(log_type, crawler).info("该词已存在")
-                    #     break
+                    if word_list[i] in [x for y in Feishu.get_values_batch(log_type, crawler, "5011a2") for x in y]:
+                        Common.logger(log_type, crawler).info("该词已存在")
+                        continue
                     Feishu.insert_columns(log_type, crawler, "5011a2", "ROWS", 1, 2)
                     time.sleep(0.5)
-                    Feishu.update_values(log_type, crawler, "5011a2", "F2:Z2", [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
-                                                                           word_list[i],
-                                                                           score_time_str,
-                                                                           score]])
+                    Feishu.update_values(log_type, crawler, "5011a2", "F2:Z2",
+                                         [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
+                                           word_list[i],
+                                           "",
+                                           "该词暂未收录"]])
                     Common.logger(log_type, crawler).info("写入飞书成功\n")
+                elif response.json()['code'] != 0:
+                    Common.logger(log_type, crawler).warning(f"{word_wechat_score_dict}")
+                    continue
+                else:
+                    time_index = response.json()['content']['resp_list'][0]['indexes'][0]['time_indexes']
+                    for x in range(len(time_index)):
+                        Common.logger(log_type, crawler).info(f"正在更新 {word_list[i]}")
+                        score_time = time_index[x]['time']
+                        score_time_str = f"{str(score_time)[:4]}-{str(score_time)[4:6]}-{str(score_time)[6:]}"
+                        score = time_index[x]['score']
+                        wechat_score_dict = {"score": score, "scoreDate": score_time_str}
+                        wechat_score_list.append(wechat_score_dict)
+                        Common.logger(log_type, crawler).info(f"wechat_score_dict:{wechat_score_dict}")
+                        Feishu.insert_columns(log_type, crawler, "5011a2", "ROWS", 1, 2)
+                        time.sleep(0.5)
+                        Feishu.update_values(log_type, crawler, "5011a2", "F2:Z2", [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
+                                                                               word_list[i],
+                                                                               score_time_str,
+                                                                               score]])
+                        Common.logger(log_type, crawler).info("写入飞书成功\n")
+                break
 
 
 if __name__ == "__main__":