Parcourir la source

Merge remote-tracking branch 'origin/master'

zhangyong il y a 1 an
Parent
commit
4d66017908

+ 331 - 139
gongzhonghao/gongzhonghao_author/gongzhonghao_author.py

@@ -11,6 +11,7 @@ from selenium.webdriver import DesiredCapabilities
 from selenium.webdriver.chrome.service import Service
 from selenium.webdriver.common.by import By
 from selenium import webdriver
+
 sys.path.append(os.getcwd())
 from common.mq import MQ
 from common.common import Common
@@ -37,8 +38,11 @@ class GongzhonghaoAuthor:
             "title": configs[0]["title"].strip(),
             "token": dict(eval(configs[0]["config"]))["token"].strip(),
             "cookie": dict(eval(configs[0]["config"]))["cookie"].strip(),
-            "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(configs[0]["update_time"]/1000))),
-            "operator": configs[0]["operator"].strip()
+            "update_time": time.strftime(
+                "%Y-%m-%d %H:%M:%S",
+                time.localtime(int(configs[0]["update_time"] / 1000)),
+            ),
+            "operator": configs[0]["operator"].strip(),
         }
         return token_dict
 
@@ -52,7 +56,7 @@ class GongzhonghaoAuthor:
             platform=crawler,
             mode=log_type,
             env=env,
-            message=f"获取站外用户信息:{user_dict['link']}"
+            message=f"获取站外用户信息:{user_dict['link']}",
         )
         while True:
             token_dict = cls.get_token(log_type, crawler, token_index, env)
@@ -62,25 +66,25 @@ class GongzhonghaoAuthor:
                 "accept-encoding": "gzip, deflate, br",
                 "accept-language": "zh-CN,zh;q=0.9",
                 "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
-                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
-                           "&type=77&createType=5&token=1011071554&lang=zh_CN",
-                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                "&type=77&createType=5&token=1011071554&lang=zh_CN",
+                "sec-ch-ua": '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
                 "sec-ch-ua-mobile": "?0",
                 "sec-ch-ua-platform": '"Windows"',
                 "sec-fetch-dest": "empty",
                 "sec-fetch-mode": "cors",
                 "sec-fetch-site": "same-origin",
                 "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
-                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
                 "x-requested-with": "XMLHttpRequest",
-                'cookie': token_dict['cookie'],
+                "cookie": token_dict["cookie"],
             }
             params = {
                 "action": "search_biz",
                 "begin": "0",
                 "count": "5",
-                "query": str(user_dict['link']),
-                "token": token_dict['token'],
+                "query": str(user_dict["link"]),
+                "token": token_dict["token"],
                 "lang": "zh_CN",
                 "f": "json",
                 "ajax": "1",
@@ -89,54 +93,98 @@ class GongzhonghaoAuthor:
             r = requests.get(url=url, headers=headers, params=params, verify=False)
             r.close()
             if r.json()["base_resp"]["err_msg"] == "invalid session":
-                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
-                Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
+                Common.logger(log_type, crawler).warning(
+                    f"status_code:{r.status_code}, get_fakeid:{r.text}\n"
+                )
+                Common.logging(
+                    log_type,
+                    crawler,
+                    env,
+                    f"status_code:{r.status_code}, get_fakeid:{r.text}\n",
+                )
                 AliyunLogger.logging(
                     code="2000",
                     platform=crawler,
                     mode=log_type,
                     env=env,
-                    message=f"status_code:{r.status_code}, get_fakeid:{r.text}\n"
+                    message=f"status_code:{r.status_code}, get_fakeid:{r.text}\n",
                 )
                 if 20 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                    Feishu.bot(
+                        log_type,
+                        crawler,
+                        f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/",
+                    )
                 time.sleep(60 * 15)
                 continue
             if r.json()["base_resp"]["err_msg"] == "freq control":
-                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
-                Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
+                Common.logger(log_type, crawler).warning(
+                    f"status_code:{r.status_code}, get_fakeid:{r.text}\n"
+                )
+                Common.logging(
+                    log_type,
+                    crawler,
+                    env,
+                    f"status_code:{r.status_code}, get_fakeid:{r.text}\n",
+                )
                 AliyunLogger.logging(
                     code="2000",
                     platform=crawler,
                     mode=log_type,
                     env=env,
-                    message=f"status_code:{r.status_code}, get_fakeid:{r.text}\n"
+                    message=f"status_code:{r.status_code}, get_fakeid:{r.text}\n",
                 )
                 if 20 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                    Feishu.bot(
+                        log_type,
+                        crawler,
+                        f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/",
+                    )
                 time.sleep(60 * 15)
                 continue
             if r.json()["base_resp"]["err_msg"] == "ok" and len(r.json()["list"]) == 0:
-                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
-                Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
+                Common.logger(log_type, crawler).warning(
+                    f"status_code:{r.status_code}, get_fakeid:{r.text}\n"
+                )
+                Common.logging(
+                    log_type,
+                    crawler,
+                    env,
+                    f"status_code:{r.status_code}, get_fakeid:{r.text}\n",
+                )
                 AliyunLogger.logging(
                     code="2000",
                     platform=crawler,
                     mode=log_type,
                     env=env,
-                    message=f"status_code:{r.status_code}, get_fakeid:{r.text}\n"
+                    message=f"status_code:{r.status_code}, get_fakeid:{r.text}\n",
+                )
+                unbind_msg = task_unbind(
+                    log_type=log_type,
+                    crawler=crawler,
+                    taskid=task_dict["id"],
+                    uids=str(user_dict["uid"]),
+                    env=env,
                 )
-                unbind_msg = task_unbind(log_type=log_type, crawler=crawler, taskid=task_dict['id'], uids=str(user_dict["uid"]), env=env)
                 if unbind_msg == "success":
                     if 20 >= datetime.datetime.now().hour >= 10:
-                        Feishu.bot(log_type, crawler, f"公众号:{user_dict['link']}, 站内昵称:{user_dict['nick_name']}\n抓取异常, 已取消抓取该公众号\n")
-                    Common.logging(log_type, crawler, env, f"公众号:{user_dict['link']}, 站内昵称:{user_dict['nick_name']}\n抓取异常, 已取消抓取该公众号\n")
+                        Feishu.bot(
+                            log_type,
+                            crawler,
+                            f"公众号:{user_dict['link']}, 站内昵称:{user_dict['nick_name']}\n抓取异常, 已取消抓取该公众号\n",
+                        )
+                    Common.logging(
+                        log_type,
+                        crawler,
+                        env,
+                        f"公众号:{user_dict['link']}, 站内昵称:{user_dict['nick_name']}\n抓取异常, 已取消抓取该公众号\n",
+                    )
                     AliyunLogger.logging(
                         code="2000",
                         platform=crawler,
                         mode=log_type,
                         env=env,
-                        message=f"公众号:{user_dict['link']}, 站内昵称:{user_dict['nick_name']}\n抓取异常, 已取消抓取该公众号\n"
+                        message=f"公众号:{user_dict['link']}, 站内昵称:{user_dict['nick_name']}\n抓取异常, 已取消抓取该公众号\n",
                     )
                 else:
                     Common.logger(log_type, crawler).warning(f"unbind_msg:{unbind_msg}")
@@ -146,18 +194,22 @@ class GongzhonghaoAuthor:
                         platform=crawler,
                         mode=log_type,
                         env=env,
-                        message=f"unbind_msg: {unbind_msg}"
+                        message=f"unbind_msg: {unbind_msg}",
                     )
                 return None
-            user_info_dict = {'user_name': r.json()["list"][0]["nickname"],
-                              'user_id': r.json()["list"][0]["fakeid"],
-                              'avatar_url': r.json()["list"][0]["round_head_img"]}
+            user_info_dict = {
+                "user_name": r.json()["list"][0]["nickname"],
+                "user_id": r.json()["list"][0]["fakeid"],
+                "avatar_url": r.json()["list"][0]["round_head_img"],
+            }
             return user_info_dict
 
     # 获取腾讯视频下载链接
     @classmethod
     def get_tencent_video_url(cls, video_id):
-        url = "https://h5vv.video.qq.com/getinfo?vid={}&platform=101001&charge=0&otype=json&defn=shd".format(video_id)
+        url = "https://h5vv.video.qq.com/getinfo?vid={}&platform=101001&charge=0&otype=json&defn=shd".format(
+            video_id
+        )
         headers = {
             "Host": "h5vv.video.qq.com",
             "xweb_xhr": "1",
@@ -168,14 +220,14 @@ class GongzhonghaoAuthor:
             "Sec-Fetch-Mode": "cors",
             "Sec-Fetch-Dest": "empty",
             "Referer": "https://servicewechat.com/wx5fcd817f3f80aece/3/page-frame.html",
-            "Accept-Language": "en"
+            "Accept-Language": "en",
         }
         response = requests.get(url, headers=headers)
         result = json.loads(response.text.replace("QZOutputJson=", "")[:-1])
-        vl = result["vl"]['vi'][0]
-        key = vl['fvkey']
-        name = vl['fn']
-        folder = vl['ul']['ui'][0]['url']
+        vl = result["vl"]["vi"][0]
+        key = vl["fvkey"]
+        name = vl["fn"]
+        folder = vl["ul"]["ui"][0]["url"]
         video_url = folder + name + "?vkey=" + key
         time.sleep(random.randint(1, 5))
         return video_url
@@ -190,27 +242,49 @@ class GongzhonghaoAuthor:
         chrome_options = webdriver.ChromeOptions()
         chrome_options.add_argument("headless")
         chrome_options.add_argument(
-            f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
+            f"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36"
+        )
         chrome_options.add_argument("--no-sandbox")
 
         # driver初始化
         if env == "prod":
             driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
         else:
-            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
-                '/Users/wangkun/Downloads/chromedriver/chromedriver_v113/chromedriver'))
+            driver = webdriver.Chrome(
+                desired_capabilities=ca,
+                options=chrome_options,
+                service=Service(
+                    "/Users/wangkun/Downloads/chromedriver/chromedriver_v113/chromedriver"
+                ),
+            )
 
         driver.implicitly_wait(10)
         driver.get(article_url)
         time.sleep(1)
 
-        if len(driver.find_elements(By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]')) != 0:
+        if (
+            len(
+                driver.find_elements(
+                    By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]'
+                )
+            )
+            != 0
+        ):
             video_url = driver.find_element(
-                By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]').get_attribute('src')
-        elif len(driver.find_elements(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]')) != 0:
-            iframe = driver.find_element(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]').get_attribute(
-                'src')
-            video_id = iframe.split('vid=')[-1].split('&')[0]
+                By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]'
+            ).get_attribute("src")
+        elif (
+            len(
+                driver.find_elements(
+                    By.XPATH, '//span[@class="js_tx_video_container"]/*[1]'
+                )
+            )
+            != 0
+        ):
+            iframe = driver.find_element(
+                By.XPATH, '//span[@class="js_tx_video_container"]/*[1]'
+            ).get_attribute("src")
+            video_id = iframe.split("vid=")[-1].split("&")[0]
             video_url = cls.get_tencent_video_url(video_id)
         else:
             video_url = 0
@@ -221,14 +295,18 @@ class GongzhonghaoAuthor:
 
     # 获取文章列表
     @classmethod
-    def get_videoList(cls, log_type, crawler, task_dict, token_index, rule_dict, user_dict, env):
+    def get_videoList(
+        cls, log_type, crawler, task_dict, token_index, rule_dict, user_dict, env
+    ):
         mq = MQ(topic_name="topic_crawler_etl_" + env)
-        user_info_dict = cls.get_user_info(log_type=log_type,
-                                           crawler=crawler,
-                                           task_dict=task_dict,
-                                           user_dict=user_dict,
-                                           token_index=token_index,
-                                           env=env)
+        user_info_dict = cls.get_user_info(
+            log_type=log_type,
+            crawler=crawler,
+            task_dict=task_dict,
+            user_dict=user_dict,
+            token_index=token_index,
+            env=env,
+        )
         if user_info_dict is None:
             return
         user_dict["user_id"] = user_info_dict["user_id"]
@@ -243,27 +321,29 @@ class GongzhonghaoAuthor:
                 "accept-encoding": "gzip, deflate, br",
                 "accept-language": "zh-CN,zh;q=0.9",
                 "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
-                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
-                           "&type=77&createType=5&token=" + str(token_dict['token']) + "&lang=zh_CN",
-                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                "&type=77&createType=5&token="
+                + str(token_dict["token"])
+                + "&lang=zh_CN",
+                "sec-ch-ua": '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
                 "sec-ch-ua-mobile": "?0",
                 "sec-ch-ua-platform": '"Windows"',
                 "sec-fetch-dest": "empty",
                 "sec-fetch-mode": "cors",
                 "sec-fetch-site": "same-origin",
                 "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
-                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
                 "x-requested-with": "XMLHttpRequest",
-                'cookie': token_dict['cookie'],
+                "cookie": token_dict["cookie"],
             }
             params = {
                 "action": "list_ex",
                 "begin": str(begin),
                 "count": "5",
-                "fakeid": user_dict['user_id'],
+                "fakeid": user_dict["user_id"],
                 "type": "9",
                 "query": "",
-                "token": str(token_dict['token']),
+                "token": str(token_dict["token"]),
                 "lang": "zh_CN",
                 "f": "json",
                 "ajax": "1",
@@ -272,104 +352,164 @@ class GongzhonghaoAuthor:
             r = requests.get(url=url, headers=headers, params=params, verify=False)
             r.close()
             if r.json()["base_resp"]["err_msg"] == "invalid session":
-                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
-                Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
+                Common.logger(log_type, crawler).warning(
+                    f"status_code:{r.status_code}, get_videoList:{r.text}\n"
+                )
+                Common.logging(
+                    log_type,
+                    crawler,
+                    env,
+                    f"status_code:{r.status_code}, get_videoList:{r.text}\n",
+                )
                 AliyunLogger.logging(
                     code="2000",
                     platform=crawler,
                     mode=log_type,
                     env=env,
-                    message=f"status_code:{r.status_code}, get_videoList:{r.text}\n"
+                    message=f"status_code:{r.status_code}, get_videoList:{r.text}\n",
                 )
                 if 20 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                    Feishu.bot(
+                        log_type,
+                        crawler,
+                        f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/",
+                    )
                 time.sleep(60 * 15)
                 continue
             if r.json()["base_resp"]["err_msg"] == "freq control":
-                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
-                Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
+                Common.logger(log_type, crawler).warning(
+                    f"status_code:{r.status_code}, get_videoList:{r.text}\n"
+                )
+                Common.logging(
+                    log_type,
+                    crawler,
+                    env,
+                    f"status_code:{r.status_code}, get_videoList:{r.text}\n",
+                )
                 AliyunLogger.logging(
                     code="2000",
                     platform=crawler,
                     mode=log_type,
                     env=env,
-                    message=f"status_code:{r.status_code}, get_videoList:{r.text}\n"
+                    message=f"status_code:{r.status_code}, get_videoList:{r.text}\n",
                 )
                 if 20 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler,f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                    Feishu.bot(
+                        log_type,
+                        crawler,
+                        f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/",
+                    )
                 time.sleep(60 * 15)
                 continue
-            if r.json()["base_resp"]["err_msg"] == "invalid args" and r.json()["base_resp"]["ret"] == 200002:
-                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
-                Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
+            if (
+                r.json()["base_resp"]["err_msg"] == "invalid args"
+                and r.json()["base_resp"]["ret"] == 200002
+            ):
+                Common.logger(log_type, crawler).warning(
+                    f"status_code:{r.status_code}, get_videoList:{r.text}\n"
+                )
+                Common.logging(
+                    log_type,
+                    crawler,
+                    env,
+                    f"status_code:{r.status_code}, get_videoList:{r.text}\n",
+                )
                 AliyunLogger.logging(
                     code="2000",
                     platform=crawler,
                     mode=log_type,
                     env=env,
-                    message=f"status_code:{r.status_code}, get_videoList:{r.text}\n"
+                    message=f"status_code:{r.status_code}, get_videoList:{r.text}\n",
+                )
+                task_unbind(
+                    log_type=log_type,
+                    crawler=crawler,
+                    taskid=task_dict["id"],
+                    uids=str(user_dict["uid"]),
+                    env=env,
                 )
-                task_unbind(log_type=log_type, crawler=crawler, taskid=task_dict['id'], uids=str(user_dict["uid"]), env=env)
                 if 20 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler,f"公众号:{user_dict['link']}, 站内昵称:{user_dict['nick_name']}\n抓取异常, 已取消抓取该公众号\n")
+                    Feishu.bot(
+                        log_type,
+                        crawler,
+                        f"公众号:{user_dict['link']}, 站内昵称:{user_dict['nick_name']}\n抓取异常, 已取消抓取该公众号\n",
+                    )
                 return
-            if 'app_msg_list' not in r.json():
-                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
-                Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
+            if "app_msg_list" not in r.json():
+                Common.logger(log_type, crawler).warning(
+                    f"status_code:{r.status_code}, get_videoList:{r.text}\n"
+                )
+                Common.logging(
+                    log_type,
+                    crawler,
+                    env,
+                    f"status_code:{r.status_code}, get_videoList:{r.text}\n",
+                )
                 AliyunLogger.logging(
                     code="2000",
                     platform=crawler,
                     mode=log_type,
                     env=env,
-                    message=f"status_code:{r.status_code}, get_videoList:{r.text}\n"
+                    message=f"status_code:{r.status_code}, get_videoList:{r.text}\n",
                 )
                 if 20 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                    Feishu.bot(
+                        log_type,
+                        crawler,
+                        f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/",
+                    )
                 time.sleep(60 * 15)
                 continue
-            if len(r.json()['app_msg_list']) == 0:
-                Common.logger(log_type, crawler).info('没有更多视频了\n')
-                Common.logging(log_type, crawler, env, '没有更多视频了\n')
+            if len(r.json()["app_msg_list"]) == 0:
+                Common.logger(log_type, crawler).info("没有更多视频了\n")
+                Common.logging(log_type, crawler, env, "没有更多视频了\n")
                 AliyunLogger.logging(
                     code="2000",
                     platform=crawler,
                     mode=log_type,
                     env=env,
-                    message="没有更多视频了\n"
+                    message="没有更多视频了\n",
                 )
                 return
             else:
                 begin += 5
-                app_msg_list = r.json()['app_msg_list']
+                app_msg_list = r.json()["app_msg_list"]
                 for article in app_msg_list:
                     try:
                         trace_id = crawler + str(uuid.uuid1())
-                        create_time = article.get('create_time', 0)
-                        update_time = article.get('update_time', 0)
+                        create_time = article.get("create_time", 0)
+                        update_time = article.get("update_time", 0)
                         publish_time_stamp = int(create_time)
                         update_time_stamp = int(update_time)
-                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-                        article_url = article.get('link', '')
+                        publish_time_str = time.strftime(
+                            "%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp)
+                        )
+                        article_url = article.get("link", "")
                         video_dict = {
-                            'video_id': article.get('aid', ''),
-                            'video_title': article.get("title", "").replace(' ', '').replace('"', '').replace("'", ""),
-                            'publish_time_stamp': publish_time_stamp,
-                            'publish_time_str': publish_time_str,
-                            'user_name': user_dict["user_name"],
-                            'play_cnt': 0,
-                            'comment_cnt': 0,
-                            'like_cnt': 0,
-                            'share_cnt': 0,
-                            'user_id': user_dict['user_id'],
-                            'avatar_url': user_dict['avatar_url'],
-                            'cover_url': article.get('cover', ''),
-                            'article_url': article.get('link', ''),
-                            'video_url': cls.get_video_url(article_url, env),
-                            'session': f'gongzhonghao-author1-{int(time.time())}'
+                            "video_id": article.get("aid", ""),
+                            "video_title": article.get("title", "")
+                            .replace(" ", "")
+                            .replace('"', "")
+                            .replace("'", ""),
+                            "publish_time_stamp": publish_time_stamp,
+                            "publish_time_str": publish_time_str,
+                            "user_name": user_dict["user_name"],
+                            "play_cnt": 0,
+                            "comment_cnt": 0,
+                            "like_cnt": 0,
+                            "share_cnt": 0,
+                            "user_id": user_dict["user_id"],
+                            "avatar_url": user_dict["avatar_url"],
+                            "cover_url": article.get("cover", ""),
+                            "article_url": article.get("link", ""),
+                            "video_url": cls.get_video_url(article_url, env),
+                            "session": f"gongzhonghao-author1-{int(time.time())}",
                         }
                         for k, v in video_dict.items():
                             Common.logger(log_type, crawler).info(f"{k}:{v}")
-                        Common.logging(log_type, crawler, env, f'video_dict:{video_dict}')
+                        Common.logging(
+                            log_type, crawler, env, f"video_dict:{video_dict}"
+                        )
                         AliyunLogger.logging(
                             code="1001",
                             trace_id=trace_id,
@@ -377,12 +517,28 @@ class GongzhonghaoAuthor:
                             mode=log_type,
                             env=env,
                             message="扫描到一条视频",
-                            data=video_dict
+                            data=video_dict,
                         )
-                        if (int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('max', 1000)))\
-                                and (int(time.time()) - update_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('max', 1000))):
-                            Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
-                            Common.logging(log_type, crawler, env, f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
+                        if (
+                            int(time.time()) - publish_time_stamp
+                            > 3600
+                            * 24
+                            * int(rule_dict.get("period", {}).get("max", 1000))
+                        ) and (
+                            int(time.time()) - update_time_stamp
+                            > 3600
+                            * 24
+                            * int(rule_dict.get("period", {}).get("max", 1000))
+                        ):
+                            Common.logger(log_type, crawler).info(
+                                f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n"
+                            )
+                            Common.logging(
+                                log_type,
+                                crawler,
+                                env,
+                                f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n",
+                            )
                             AliyunLogger.logging(
                                 code="2004",
                                 trace_id=trace_id,
@@ -396,7 +552,10 @@ class GongzhonghaoAuthor:
                             )
                             return
 
-                        if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
+                        if (
+                            video_dict["article_url"] == 0
+                            or video_dict["video_url"] == 0
+                        ):
                             Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
                             Common.logging(log_type, crawler, env, "文章涉嫌违反相关法律法规和政策\n")
                             AliyunLogger.logging(
@@ -406,15 +565,24 @@ class GongzhonghaoAuthor:
                                 mode=log_type,
                                 env=env,
                                 data=video_dict,
-                                message="无效文章或视频"
+                                message="无效文章或视频",
                             )
                         # 标题敏感词过滤
-                        elif any(str(word) if str(word) in video_dict['video_title'] else False
-                                 for word in get_config_from_mysql(log_type=log_type,
-                                                                   source=crawler,
-                                                                   env=env,
-                                                                   text="filter",
-                                                                   action="")) is True:
+                        elif (
+                            any(
+                                str(word)
+                                if str(word) in video_dict["video_title"]
+                                else False
+                                for word in get_config_from_mysql(
+                                    log_type=log_type,
+                                    source=crawler,
+                                    env=env,
+                                    text="filter",
+                                    action="",
+                                )
+                            )
+                            is True
+                        ):
                             Common.logger(log_type, crawler).info("标题已中过滤词\n")
                             Common.logging(log_type, crawler, env, "标题已中过滤词\n")
                             AliyunLogger.logging(
@@ -424,10 +592,19 @@ class GongzhonghaoAuthor:
                                 mode=log_type,
                                 env=env,
                                 data=video_dict,
-                                message="标题已中过滤词\n"
+                                message="标题已中过滤词\n",
                             )
                         # 已下载判断
-                        elif cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'], env) != 0:
+                        elif (
+                            cls.repeat_video(
+                                log_type,
+                                crawler,
+                                video_dict["video_id"],
+                                video_dict["video_title"],
+                                env,
+                            )
+                            != 0
+                        ):
                             Common.logger(log_type, crawler).info("视频已下载\n")
                             Common.logging(log_type, crawler, env, "视频已下载\n")
                             AliyunLogger.logging(
@@ -437,7 +614,7 @@ class GongzhonghaoAuthor:
                                 mode=log_type,
                                 env=env,
                                 data=video_dict,
-                                message="视频已下载"
+                                message="视频已下载",
                             )
                         # 标题相似度
                         # elif title_like(log_type, crawler, video_dict['video_title'], cls.platform, env) is True:
@@ -451,7 +628,9 @@ class GongzhonghaoAuthor:
                             video_dict["width"] = 0
                             video_dict["height"] = 0
                             video_dict["crawler_rule"] = json.dumps(rule_dict)
-                            video_dict["user_id"] = user_dict["uid"]  # 站内 UID?爬虫获取不到了(随机发布到原 5 个账号中)
+                            video_dict["user_id"] = user_dict[
+                                "uid"
+                            ]  # 站内 UID?爬虫获取不到了(随机发布到原 5 个账号中)
                             video_dict["publish_time"] = video_dict["publish_time_str"]
                             mq.send_msg(video_dict)
                             AliyunLogger.logging(
@@ -461,7 +640,7 @@ class GongzhonghaoAuthor:
                                 mode=log_type,
                                 env=env,
                                 data=video_dict,
-                                message="成功发送 MQ 至 ETL"
+                                message="成功发送 MQ 至 ETL",
                             )
                             time.sleep(random.randint(1, 8))
                     except Exception as e:
@@ -472,10 +651,10 @@ class GongzhonghaoAuthor:
                             platform=crawler,
                             mode=log_type,
                             env=env,
-                            message=f"抓取单条视频异常:{e}\n"
+                            message=f"抓取单条视频异常:{e}\n",
                         )
-                Common.logger(log_type, crawler).info('休眠 60 秒\n')
-                Common.logging(log_type, crawler, env, '休眠 60 秒\n')
+                Common.logger(log_type, crawler).info("休眠 60 秒\n")
+                Common.logging(log_type, crawler, env, "休眠 60 秒\n")
                 time.sleep(60)
 
     @classmethod
@@ -487,7 +666,11 @@ class GongzhonghaoAuthor:
         return len(repeat_video)
 
     @classmethod
-    def get_all_videos(cls, log_type, crawler, task_dict, token_index, rule_dict, user_list, env):
+    def get_all_videos(
+        cls, log_type, crawler, task_dict, token_index, rule_dict, user_list, env
+    ):
+        total_s = 8 * 60 * 60  # 每个爬虫每天抓取的时间是12h(8h等待+4h抓取)
+        wait_average_time = int((total_s / len(user_list)))
         for user_dict in user_list:
             Common.logger(log_type, crawler).info(f'抓取公众号:{user_dict["nick_name"]}\n')
             Common.logging(log_type, crawler, env, f'抓取公众号:{user_dict["nick_name"]}\n')
@@ -496,35 +679,44 @@ class GongzhonghaoAuthor:
                 platform=crawler,
                 mode=log_type,
                 env=env,
-                message="开始抓取公众号: {}".format(user_dict['nick_name'])
+                message="开始抓取公众号: {}".format(user_dict["nick_name"]),
             )
             try:
-                cls.get_videoList(log_type=log_type,
-                                  crawler=crawler,
-                                  task_dict=task_dict,
-                                  token_index = token_index,
-                                  rule_dict=rule_dict,
-                                  user_dict=user_dict,
-                                  env=env)
-                Common.logger(log_type, crawler).info('休眠 60 秒\n')
-                Common.logging(log_type, crawler, env, '休眠 60 秒\n')
-                time.sleep(60)
+                cls.get_videoList(
+                    log_type=log_type,
+                    crawler=crawler,
+                    task_dict=task_dict,
+                    token_index=token_index,
+                    rule_dict=rule_dict,
+                    user_dict=user_dict,
+                    env=env,
+                )
+                sleep_time = random.randint(
+                    wait_average_time - 120, wait_average_time - 60
+                )
+                Common.logger(log_type, crawler).info("休眠 {} 秒\n".format(sleep_time))
+                Common.logging(log_type, crawler, env, "休眠 {} 秒\n".format(sleep_time))
+                time.sleep(sleep_time)
             except Exception as e:
-                Common.logger(log_type, crawler).info(f'抓取公众号:{user_dict["nick_name"]}时异常:{e}\n')
-                Common.logging(log_type, crawler, env, f'抓取公众号:{user_dict["nick_name"]}时异常:{e}\n')
+                Common.logger(log_type, crawler).info(
+                    f'抓取公众号:{user_dict["nick_name"]}时异常:{e}\n'
+                )
+                Common.logging(
+                    log_type, crawler, env, f'抓取公众号:{user_dict["nick_name"]}时异常:{e}\n'
+                )
                 AliyunLogger.logging(
                     code="3000",
                     platform=crawler,
                     mode=log_type,
                     env=env,
-                    message="抓取公众号: {} 时异常".format(user_dict['nick_name'])
+                    message="抓取公众号: {} 时异常".format(user_dict["nick_name"]),
                 )
             AliyunLogger.logging(
                 code="1004",
                 platform=crawler,
                 mode=log_type,
                 env=env,
-                message="完成抓取公众号: {}".format(user_dict['nick_name'])
+                message="完成抓取公众号: {}".format(user_dict["nick_name"]),
             )
 
 

+ 1 - 11
resend_msg.py

@@ -45,16 +45,6 @@ if __name__ == "__main__":
         video_dict['strategy'] = strategy
         video_dict['platform'] = platform
         video_dict['crawler_rule'] = json.dumps({})
-        print(json.dumps(video_dict, ensure_ascii=False, indent=4))
+        # print(json.dumps(video_dict, ensure_ascii=False, indent=4))
         mq = MQ(topic_name="topic_crawler_etl_" + "prod")
         mq.send_msg(video_dict)
-
-    # video_dict = {'video_title': '吴尊友因病去世!吴老师,您真的不容易,千言万语,汇成一句话您走好❗️', 'video_id': '5262651713', 'duration': 49, 'play_cnt': 71, 'like_cnt': 0, 'comment_cnt': 0, 'share_cnt': 1, 'user_name': '夏日❤️莲莲', 'publish_time_stamp': 1698398572, 'publish_time_str': '2023-10-27 17:22:52', 'video_width': 537, 'video_height': 954, 'avatar_url': 'https://cdn-xphoto2.xiaoniangao.cn/4987933869?Expires=1704038400&OSSAccessKeyId=LTAI4G2W1FsgwzAWYpPoB3v6&Signature=wopOmtlcp9tGyWHYW9uy7DIXO%2Bg%3D&x-oss-process=image%2Fresize%2Cw_200%2Ch_200%2Climit_0%2Finterlace%2C1%2Fquality%2Cq_50%2Fcrop%2Cw_200%2Ch_200%2Cg_center%2Fformat%2Cjpg%2Fauto-orient%2C0', 'profile_id': 55888345, 'profile_mid': 185546, 'cover_url': 'https://cdn-xphoto2.xiaoniangao.cn/5262652619?Expires=1704038400&OSSAccessKeyId=LTAI4G2W1FsgwzAWYpPoB3v6&Signature=qIIRzRICgyv40n3uMFeMwHCY8JY%3D&x-oss-process=image%2Fresize%2Cw_690%2Ch_385%2Climit_0%2Finterlace%2C1%2Fformat%2Cjpg%2Fauto-orient%2C0', 'video_url': 'https://cdn-xalbum2.xiaoniangao.cn/5262651713?Expires=1704038400&OSSAccessKeyId=LTAI5tB7cRkYiqHcTdkVprwb&Signature=hFGFAB49mmgUYwYcF4679bE%2BgLg%3D', 'session': 'xiaoniangao-author-1698402882'}
-    # video_dict['strategy'] = "author"
-    # video_dict['platform'] = "xiaoniangao"
-    # video_dict['user_id'] = 58528269
-    # video_dict['out_video_id'] = video_dict['video_id']
-    # print(json.dumps(video_dict, ensure_ascii=False, indent=4))
-    # mq = MQ(topic_name="topic_crawler_etl_" + "prod")
-    # mq.send_msg(video_dict)
-    #

+ 1 - 0
zhuwanwufusu/zhuwanwufusu_recommend/zwwfs_recommend.py

@@ -161,6 +161,7 @@ class ZhuWanWuFuSuRecommend(object):
                         )
 
     def process_video_obj(self, video_obj):
+        time.sleep(random.randint(3, 8))
         trace_id = self.platform + str(uuid.uuid1())
         if video_obj.get("playnum"):
             play_cnt = int(video_obj['playnum'].replace("万+", "0000")) if "万+" in video_obj['playnum'] else int(