Browse Source

Merge branch 'master' of https://git.yishihui.com/Server/piaoquan_crawler

lierqiang 2 years ago
parent
commit
9eba1c9576

+ 2 - 0
README.MD

@@ -221,4 +221,6 @@ ps aux | grep run_jixiangxingfu | grep -v grep | awk '{print $2}' | xargs kill -
 ps aux | grep run_zhongmiaoyinxin | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep run_zhiqingtiantiankan | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep search_key_mac | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep gongzhonghao | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep xiaoniangao | grep -v grep | awk '{print $2}' | xargs kill -9
 ```

+ 2 - 4
common/public.py

@@ -8,8 +8,6 @@ import random
 sys.path.append(os.getcwd())
 from common.common import Common
 from common.scheduling_db import MysqlHelper
-
-
 # from common import Common
 # from scheduling_db import MysqlHelper
 
@@ -81,7 +79,7 @@ def random_title(log_type, crawler, env, text):
 
 
 def task_fun(task_str):
-    task_str = task_str.replace("'[{", '[{').replace("}}]'", '}}]')
+    task_str = task_str.replace("'[", '[').replace("]'", ']')
     task_dict = dict(eval(task_str))
     rule = task_dict['rule']
     task_dict['rule'] = dict()
@@ -187,7 +185,7 @@ def download_rule(log_type, crawler, video_dict, rule_dict):
 
 if __name__ == "__main__":
     # print(filter_word('public', 'xiaoniangao', '小年糕', 'prod'))
-    print(get_config_from_mysql('hour', 'xiaoniangao', 'dev', 'emoji'))
+    print(get_config_from_mysql('hour', 'xiaoniangao', 'prod', 'emoji'))
     # task_str = "[('task_id','11')," \
     #            "('task_name','小年糕小时榜')," \
     #            "('source','xiaoniangao')," \

BIN
gongzhonghao/.DS_Store


+ 3 - 0
gongzhonghao/gongzhonghao_author/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/23

+ 579 - 0
gongzhonghao/gongzhonghao_author/gongzhonghao1_author.py

@@ -0,0 +1,579 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/28
+import datetime
+import difflib
+import json
+import os
+import shutil
+import sys
+import time
+from hashlib import md5
+import requests
+import urllib3
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
+from selenium.webdriver.common.by import By
+from selenium import webdriver
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.publish import Publish
+from common.scheduling_db import MysqlHelper
+from common.public import get_config_from_mysql
+
+
+class GongzhonghaoAuthor1:
+    # 翻页参数
+    begin = 0
+    platform = "公众号"
+
+    # 基础门槛规则
+    @staticmethod
+    def download_rule(log_type, crawler, video_dict, rule_dict):
+        """
+        下载视频的基本规则
+        :param log_type: 日志
+        :param crawler: 哪款爬虫
+        :param video_dict: 视频信息,字典格式
+        :param rule_dict: 规则信息,字典格式
+        :return: 满足规则,返回 True;反之,返回 False
+        """
+        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
+        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
+        if rule_play_cnt_max == 0:
+            rule_play_cnt_max = 100000000
+
+        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
+        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
+        if rule_duration_max == 0:
+            rule_duration_max = 100000000
+
+        rule_period_min = rule_dict.get('period', {}).get('min', 0)
+        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
+        # if rule_period_max == 0:
+        #     rule_period_max = 100000000
+
+        rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
+        rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
+        if rule_fans_cnt_max == 0:
+            rule_fans_cnt_max = 100000000
+
+        rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
+        rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
+        if rule_videos_cnt_max == 0:
+            rule_videos_cnt_max = 100000000
+
+        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
+        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
+        if rule_like_cnt_max == 0:
+            rule_like_cnt_max = 100000000
+
+        rule_width_min = rule_dict.get('width', {}).get('min', 0)
+        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
+        if rule_width_max == 0:
+            rule_width_max = 100000000
+
+        rule_height_min = rule_dict.get('height', {}).get('min', 0)
+        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
+        if rule_height_max == 0:
+            rule_height_max = 100000000
+
+        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
+        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
+        if rule_share_cnt_max == 0:
+            rule_share_cnt_max = 100000000
+
+        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
+        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
+        if rule_comment_cnt_max == 0:
+            rule_comment_cnt_max = 100000000
+
+        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
+        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 100000000)
+        if rule_publish_time_max == 0:
+            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
+
+        Common.logger(log_type, crawler).info(
+            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
+
+        if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
+                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
+                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
+                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
+                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
+                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
+                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
+                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min)\
+                and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min):
+            return True
+        else:
+            return False
+
+    @classmethod
+    def title_like(cls, log_type, crawler, title, env):
+        select_sql = f""" select * from crawler_video where platform="公众号" """
+        video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
+        if len(video_list) == 0:
+            return None
+        for video_dict in video_list:
+            video_title = video_dict["video_title"]
+            if difflib.SequenceMatcher(None, title, video_title).quick_ratio() >= 0.8:
+                return True
+            else:
+                pass
+
+    # 获取 token
+    @classmethod
+    def get_token(cls, log_type, crawler, env):
+        select_sql = f""" select * from crawler_config where source="{crawler}" and title LIKE "%公众号_1%";"""
+        configs = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
+        if len(configs) == 0:
+            # Common.logger(log_type, crawler).warning(f"公众号_1未配置token")
+            Feishu.bot(log_type, crawler, "公众号_1:未配置token")
+            time.sleep(60)
+            return None
+        token_dict = {
+            "token_id": configs[0]["id"],
+            "title": configs[0]["title"],
+            "token": dict(eval(configs[0]["config"]))["token"],
+            "cookie": dict(eval(configs[0]["config"]))["cookie"],
+            "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(configs[0]["update_time"]/1000))),
+            "operator": configs[0]["operator"]
+        }
+        # for k, v in token_dict.items():
+        #     print(f"{k}:{v}")
+        return token_dict
+
+    # 获取用户 fakeid
+    @classmethod
+    def get_fakeid(cls, log_type, crawler, wechat_name, env):
+        while True:
+            token_dict = cls.get_token(log_type, crawler, env)
+            url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
+            headers = {
+                "accept": "*/*",
+                "accept-encoding": "gzip, deflate, br",
+                "accept-language": "zh-CN,zh;q=0.9",
+                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                           "&type=77&createType=5&token=1011071554&lang=zh_CN",
+                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "sec-ch-ua-mobile": "?0",
+                "sec-ch-ua-platform": '"Windows"',
+                "sec-fetch-dest": "empty",
+                "sec-fetch-mode": "cors",
+                "sec-fetch-site": "same-origin",
+                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                "x-requested-with": "XMLHttpRequest",
+                'cookie': token_dict['cookie'],
+            }
+            params = {
+                "action": "search_biz",
+                "begin": "0",
+                "count": "5",
+                "query": str(wechat_name),
+                "token": token_dict['token'],
+                "lang": "zh_CN",
+                "f": "json",
+                "ajax": "1",
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, verify=False)
+            r.close()
+            if r.json()["base_resp"]["err_msg"] == "invalid session":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 过期啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if r.json()["base_resp"]["err_msg"] == "freq control":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if "list" not in r.json() or len(r.json()["list"]) == 0:
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+
+            fakeid = r.json()["list"][0]["fakeid"]
+            head_url = r.json()["list"][0]["round_head_img"]
+            fakeid_dict = {'fakeid': fakeid, 'head_url': head_url}
+            return fakeid_dict
+
+    # 获取腾讯视频下载链接
+    @classmethod
+    def get_tencent_video_url(cls, video_id):
+        # try:
+        url = 'https://vv.video.qq.com/getinfo?vids=' + str(video_id) + '&platform=101001&charge=0&otype=json'
+        response = requests.get(url=url).text.replace('QZOutputJson=', '').replace('"};', '"}')
+        response = json.loads(response)
+        url = response['vl']['vi'][0]['ul']['ui'][0]['url']
+        fvkey = response['vl']['vi'][0]['fvkey']
+        video_url = url + str(video_id) + '.mp4?vkey=' + fvkey
+        return video_url
+        # except Exception as e:
+        #     Common.logger(log_type, crawler).error(f"get_tencent_video_url异常:{e}\n")
+
+    @classmethod
+    def get_video_url(cls, article_url, env):
+        # try:
+        # 打印请求配置
+        ca = DesiredCapabilities.CHROME
+        ca["goog:loggingPrefs"] = {"performance": "ALL"}
+
+        # 不打开浏览器运行
+        chrome_options = webdriver.ChromeOptions()
+        chrome_options.add_argument("headless")
+        chrome_options.add_argument(
+            f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
+        chrome_options.add_argument("--no-sandbox")
+
+        # driver初始化
+        if env == "prod":
+            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
+        else:
+            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
+                '/Users/wangkun/Downloads/chromedriver/chromedriver_v111/chromedriver'))
+
+        driver.implicitly_wait(10)
+        # Common.logger(log_type, crawler).info('打开文章链接')
+        driver.get(article_url)
+        time.sleep(1)
+
+        if len(driver.find_elements(By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]')) != 0:
+            video_url = driver.find_element(
+                By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]').get_attribute('src')
+        elif len(driver.find_elements(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]')) != 0:
+            iframe = driver.find_element(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]').get_attribute(
+                'src')
+            video_id = iframe.split('vid=')[-1].split('&')[0]
+            video_url = cls.get_tencent_video_url(video_id)
+        else:
+            video_url = 0
+        driver.quit()
+        return video_url
+        # except Exception as e:
+        #     Common.logger(log_type, crawler).info(f'get_video_url异常:{e}\n')
+
+    # 获取文章列表
+    @classmethod
+    def get_videoList(cls, log_type, crawler, wechat_name, rule_dict, user_name, uid, oss_endpoint, env):
+        # try:
+        while True:
+            token_dict = cls.get_token(log_type, crawler, env)
+            fakeid_dict = cls.get_fakeid(log_type=log_type,
+                                         crawler=crawler,
+                                         wechat_name=wechat_name,
+                                         env=env)
+            url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+            headers = {
+                "accept": "*/*",
+                "accept-encoding": "gzip, deflate, br",
+                "accept-language": "zh-CN,zh;q=0.9",
+                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                           "&type=77&createType=5&token=" + str(token_dict['token']) + "&lang=zh_CN",
+                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "sec-ch-ua-mobile": "?0",
+                "sec-ch-ua-platform": '"Windows"',
+                "sec-fetch-dest": "empty",
+                "sec-fetch-mode": "cors",
+                "sec-fetch-site": "same-origin",
+                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                "x-requested-with": "XMLHttpRequest",
+                'cookie': token_dict['cookie'],
+            }
+            params = {
+                "action": "list_ex",
+                "begin": str(cls.begin),
+                "count": "5",
+                "fakeid": fakeid_dict['fakeid'],
+                "type": "9",
+                "query": "",
+                "token": str(token_dict['token']),
+                "lang": "zh_CN",
+                "f": "json",
+                "ajax": "1",
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, verify=False)
+            r.close()
+            if r.json()["base_resp"]["err_msg"] == "invalid session":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 过期啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if r.json()["base_resp"]["err_msg"] == "freq control":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}, 操作人:{token_dict['operator']}, 更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler,f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if 'app_msg_list' not in r.json():
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if len(r.json()['app_msg_list']) == 0:
+                Common.logger(log_type, crawler).info('没有更多视频了\n')
+                return
+            else:
+                cls.begin += 5
+                app_msg_list = r.json()['app_msg_list']
+                for article_url in app_msg_list:
+                    # title
+                    video_title = article_url.get("title", "").replace('/', '').replace('\n', '') \
+                            .replace('.', '').replace('“', '').replace('”', '').replace(' ', '')\
+                            .replace('"', '').replace("'", "")
+                    # aid
+                    aid = article_url.get('aid', '')
+                    # create_time
+                    create_time = article_url.get('create_time', 0)
+                    publish_time_stamp = int(create_time)
+                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                    avatar_url = fakeid_dict['head_url']
+                    # cover_url
+                    cover_url = article_url.get('cover', '')
+                    # article_url
+                    article_url = article_url.get('link', '')
+                    video_url = cls.get_video_url(article_url, env)
+
+                    video_dict = {
+                        'video_id': aid,
+                        'video_title': video_title,
+                        'publish_time_stamp': publish_time_stamp,
+                        'publish_time_str': publish_time_str,
+                        'user_name': user_name,
+                        'play_cnt': 0,
+                        'comment_cnt': 0,
+                        'like_cnt': 0,
+                        'share_cnt': 0,
+                        'user_id': fakeid_dict['fakeid'],
+                        'avatar_url': avatar_url,
+                        'cover_url': cover_url,
+                        'article_url': article_url,
+                        'video_url': video_url,
+                        'session': f'gongzhonghao-author1-{int(time.time())}'
+                    }
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                    if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('min', 1000)):
+                        Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('min', 1000))}天\n")
+                        cls.begin = 0
+                        return
+
+                    if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
+                        Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
+                    # 标题敏感词过滤
+                    elif any(str(word) if str(word) in video_dict['video_title'] else False
+                             for word in get_config_from_mysql(log_type=log_type,
+                                                               source=crawler,
+                                                               env=env,
+                                                               text="filter",
+                                                               action="")) is True:
+                        Common.logger(log_type, crawler).info("标题已中过滤词\n")
+                    # 已下载判断
+                    elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                        Common.logger(log_type, crawler).info("视频已下载\n")
+                    # 标题相似度
+                    elif cls.title_like(log_type, crawler, video_dict['video_title'], env) is True:
+                        Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
+                    else:
+                        cls.download_publish(log_type=log_type,
+                                             crawler=crawler,
+                                             video_dict=video_dict,
+                                             rule_dict=rule_dict,
+                                             uid=uid,
+                                             oss_endpoint=oss_endpoint,
+                                             env=env)
+
+                Common.logger(log_type, crawler).info('休眠 60 秒\n')
+                time.sleep(60)
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="公众号" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    # 下载/上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, video_dict, rule_dict, uid, oss_endpoint, env):
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text="video",
+                               title=video_dict["video_title"], url=video_dict["video_url"])
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        # 获取视频时长
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler,
+                                    f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+        if ffmpeg_dict is None:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            return
+        video_dict["video_width"] = ffmpeg_dict["width"]
+        video_dict["video_height"] = ffmpeg_dict["height"]
+        video_dict["duration"] = ffmpeg_dict["duration"]
+        video_size = ffmpeg_dict["size"]
+        Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
+        Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
+        Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
+        Common.logger(log_type, crawler).info(f'video_size:{video_size}')
+        # 视频size=0,直接删除
+        if int(video_size) == 0 or cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            return
+        if cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
+            return
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text="cover",
+                               title=video_dict["video_title"], url=video_dict["cover_url"])
+        # 保存视频信息至 "./videos/{video_title}/info.txt"
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        strategy = "定向榜爬虫策略"
+        our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                  crawler=crawler,
+                                                  strategy=strategy,
+                                                  our_uid=uid,
+                                                  oss_endpoint=oss_endpoint,
+                                                  env=env)
+        if env == 'prod':
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+        else:
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+        Common.logger(log_type, crawler).info("视频上传完成")
+
+        if our_video_id is None:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+            return
+
+        insert_sql = f""" insert into crawler_video(video_id,
+                                                    out_user_id,
+                                                    platform,
+                                                    strategy,
+                                                    out_video_id,
+                                                    video_title,
+                                                    cover_url,
+                                                    video_url,
+                                                    duration,
+                                                    publish_time,
+                                                    play_cnt,
+                                                    crawler_rule,
+                                                    width,
+                                                    height)
+                                                    values({our_video_id},
+                                                    "{video_dict['user_id']}",
+                                                    "{cls.platform}",
+                                                    "定向爬虫策略",
+                                                    "{video_dict['video_id']}",
+                                                    "{video_dict['video_title']}",
+                                                    "{video_dict['cover_url']}",
+                                                    "{video_dict['video_url']}",
+                                                    {int(video_dict['duration'])},
+                                                    "{video_dict['publish_time_str']}",
+                                                    {int(video_dict['play_cnt'])},
+                                                    '{json.dumps(rule_dict)}',
+                                                    {int(video_dict['video_width'])},
+                                                    {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+        Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+
+        # 视频写入飞书
+        Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
+        # 视频ID工作表,首行写入数据
+        upload_time = int(time.time())
+        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                   "用户主页",
+                   video_dict['video_title'],
+                   video_dict['video_id'],
+                   our_video_link,
+                   int(video_dict['duration']),
+                   f"{video_dict['video_width']}*{video_dict['video_height']}",
+                   video_dict['publish_time_str'],
+                   video_dict['user_name'],
+                   video_dict['user_id'],
+                   video_dict['avatar_url'],
+                   video_dict['cover_url'],
+                   video_dict['article_url'],
+                   video_dict['video_url']]]
+        time.sleep(0.5)
+        Feishu.update_values(log_type, crawler, "47e39d", "F2:Z2", values)
+        Common.logger(log_type, crawler).info('视频下载/上传成功\n')
+
+    @classmethod
+    def get_all_videos(cls, log_type, crawler, user_list, rule_dict, oss_endpoint, env):
+        if len(user_list) == 0:
+            Common.logger(log_type, crawler).warning(f"抓取用户列表为空\n")
+            return
+        for user in user_list:
+            # try:
+            user_name = user['nick_name']
+            wechat_name = user['link']
+            uid = user['uid']
+            Common.logger(log_type, crawler).info(f'获取 {user_name} 公众号视频\n')
+            cls.get_videoList(log_type=log_type,
+                              crawler=crawler,
+                              wechat_name=wechat_name,
+                              rule_dict=rule_dict,
+                              user_name=user_name,
+                              uid=uid,
+                              oss_endpoint=oss_endpoint,
+                              env=env)
+            cls.begin = 0
+            Common.logger(log_type, crawler).info('休眠 60 秒\n')
+            time.sleep(60)
+            # except Exception as e:
+            #     Common.logger(log_type, crawler).info(f'get_all_videos异常:{e}\n')
+
+
+if __name__ == "__main__":
+    GongzhonghaoAuthor1.get_token("author", "gongzhonghao", "dev")
+    # print(get_config_from_mysql("author", "gongzhonghao", "dev", "filter", action=""))
+    pass

+ 579 - 0
gongzhonghao/gongzhonghao_author/gongzhonghao2_author.py

@@ -0,0 +1,579 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/28
+import datetime
+import difflib
+import json
+import os
+import shutil
+import sys
+import time
+from hashlib import md5
+import requests
+import urllib3
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
+from selenium.webdriver.common.by import By
+from selenium import webdriver
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.publish import Publish
+from common.scheduling_db import MysqlHelper
+from common.public import get_config_from_mysql
+
+
+class GongzhonghaoAuthor2:
+    # 翻页参数
+    begin = 0
+    platform = "公众号"
+
+    # 基础门槛规则
+    @staticmethod
+    def download_rule(log_type, crawler, video_dict, rule_dict):
+        """
+        下载视频的基本规则
+        :param log_type: 日志
+        :param crawler: 哪款爬虫
+        :param video_dict: 视频信息,字典格式
+        :param rule_dict: 规则信息,字典格式
+        :return: 满足规则,返回 True;反之,返回 False
+        """
+        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
+        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
+        if rule_play_cnt_max == 0:
+            rule_play_cnt_max = 100000000
+
+        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
+        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
+        if rule_duration_max == 0:
+            rule_duration_max = 100000000
+
+        rule_period_min = rule_dict.get('period', {}).get('min', 0)
+        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
+        # if rule_period_max == 0:
+        #     rule_period_max = 100000000
+
+        rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
+        rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
+        if rule_fans_cnt_max == 0:
+            rule_fans_cnt_max = 100000000
+
+        rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
+        rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
+        if rule_videos_cnt_max == 0:
+            rule_videos_cnt_max = 100000000
+
+        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
+        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
+        if rule_like_cnt_max == 0:
+            rule_like_cnt_max = 100000000
+
+        rule_width_min = rule_dict.get('width', {}).get('min', 0)
+        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
+        if rule_width_max == 0:
+            rule_width_max = 100000000
+
+        rule_height_min = rule_dict.get('height', {}).get('min', 0)
+        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
+        if rule_height_max == 0:
+            rule_height_max = 100000000
+
+        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
+        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
+        if rule_share_cnt_max == 0:
+            rule_share_cnt_max = 100000000
+
+        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
+        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
+        if rule_comment_cnt_max == 0:
+            rule_comment_cnt_max = 100000000
+
+        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
+        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 100000000)
+        if rule_publish_time_max == 0:
+            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
+
+        Common.logger(log_type, crawler).info(
+            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
+
+        if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
+                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
+                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
+                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
+                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
+                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
+                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
+                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min) \
+                and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min):
+            return True
+        else:
+            return False
+
+    @classmethod
+    def title_like(cls, log_type, crawler, title, env):
+        select_sql = f""" select * from crawler_video where platform="公众号" """
+        video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
+        if len(video_list) == 0:
+            return None
+        for video_dict in video_list:
+            video_title = video_dict["video_title"]
+            if difflib.SequenceMatcher(None, title, video_title).quick_ratio() >= 0.8:
+                return True
+            else:
+                pass
+
+    # 获取 token
+    @classmethod
+    def get_token(cls, log_type, crawler, env):
+        select_sql = f""" select * from crawler_config where source="{crawler}" and title LIKE "%公众号_2%";"""
+        configs = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
+        if len(configs) == 0:
+            # Common.logger(log_type, crawler).warning(f"公众号_2未配置token")
+            Feishu.bot(log_type, crawler, "公众号_2:未配置token")
+            time.sleep(60)
+            return None
+        token_dict = {
+            "token_id": configs[0]["id"],
+            "title": configs[0]["title"],
+            "token": dict(eval(configs[0]["config"]))["token"],
+            "cookie": dict(eval(configs[0]["config"]))["cookie"],
+            "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(configs[0]["update_time"]/1000))),
+            "operator": configs[0]["operator"]
+        }
+        for k, v in token_dict.items():
+            print(f"{k}:{v}")
+        return token_dict
+
+    # 获取用户 fakeid
+    @classmethod
+    def get_fakeid(cls, log_type, crawler, wechat_name, env):
+        while True:
+            token_dict = cls.get_token(log_type, crawler, env)
+            url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
+            headers = {
+                "accept": "*/*",
+                "accept-encoding": "gzip, deflate, br",
+                "accept-language": "zh-CN,zh;q=0.9",
+                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                           "&type=77&createType=5&token=1011071554&lang=zh_CN",
+                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "sec-ch-ua-mobile": "?0",
+                "sec-ch-ua-platform": '"Windows"',
+                "sec-fetch-dest": "empty",
+                "sec-fetch-mode": "cors",
+                "sec-fetch-site": "same-origin",
+                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                "x-requested-with": "XMLHttpRequest",
+                'cookie': token_dict['cookie'],
+            }
+            params = {
+                "action": "search_biz",
+                "begin": "0",
+                "count": "5",
+                "query": str(wechat_name),
+                "token": token_dict['token'],
+                "lang": "zh_CN",
+                "f": "json",
+                "ajax": "1",
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, verify=False)
+            r.close()
+            if r.json()["base_resp"]["err_msg"] == "invalid session":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 过期啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if r.json()["base_resp"]["err_msg"] == "freq control":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if "list" not in r.json() or len(r.json()["list"]) == 0:
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+
+            fakeid = r.json()["list"][0]["fakeid"]
+            head_url = r.json()["list"][0]["round_head_img"]
+            fakeid_dict = {'fakeid': fakeid, 'head_url': head_url}
+            return fakeid_dict
+
+    # 获取腾讯视频下载链接
+    @classmethod
+    def get_tencent_video_url(cls, video_id):
+        # try:
+        url = 'https://vv.video.qq.com/getinfo?vids=' + str(video_id) + '&platform=101001&charge=0&otype=json'
+        response = requests.get(url=url).text.replace('QZOutputJson=', '').replace('"};', '"}')
+        response = json.loads(response)
+        url = response['vl']['vi'][0]['ul']['ui'][0]['url']
+        fvkey = response['vl']['vi'][0]['fvkey']
+        video_url = url + str(video_id) + '.mp4?vkey=' + fvkey
+        return video_url
+        # except Exception as e:
+        #     Common.logger(log_type, crawler).error(f"get_tencent_video_url异常:{e}\n")
+
+    @classmethod
+    def get_video_url(cls, article_url, env):
+        # try:
+        # 打印请求配置
+        ca = DesiredCapabilities.CHROME
+        ca["goog:loggingPrefs"] = {"performance": "ALL"}
+
+        # 不打开浏览器运行
+        chrome_options = webdriver.ChromeOptions()
+        chrome_options.add_argument("headless")
+        chrome_options.add_argument(
+            f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
+        chrome_options.add_argument("--no-sandbox")
+
+        # driver初始化
+        if env == "prod":
+            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
+        else:
+            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
+                '/Users/wangkun/Downloads/chromedriver/chromedriver_v111/chromedriver'))
+
+        driver.implicitly_wait(10)
+        # Common.logger(log_type, crawler).info('打开文章链接')
+        driver.get(article_url)
+        time.sleep(1)
+
+        if len(driver.find_elements(By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]')) != 0:
+            video_url = driver.find_element(
+                By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]').get_attribute('src')
+        elif len(driver.find_elements(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]')) != 0:
+            iframe = driver.find_element(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]').get_attribute(
+                'src')
+            video_id = iframe.split('vid=')[-1].split('&')[0]
+            video_url = cls.get_tencent_video_url(video_id)
+        else:
+            video_url = 0
+        driver.quit()
+        return video_url
+        # except Exception as e:
+        #     Common.logger(log_type, crawler).info(f'get_video_url异常:{e}\n')
+
+    # 获取文章列表
+    @classmethod
+    def get_videoList(cls, log_type, crawler, wechat_name, rule_dict, user_name, uid, oss_endpoint, env):
+        # try:
+        while True:
+            token_dict = cls.get_token(log_type, crawler, env)
+            fakeid_dict = cls.get_fakeid(log_type=log_type,
+                                         crawler=crawler,
+                                         wechat_name=wechat_name,
+                                         env=env)
+            url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+            headers = {
+                "accept": "*/*",
+                "accept-encoding": "gzip, deflate, br",
+                "accept-language": "zh-CN,zh;q=0.9",
+                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                           "&type=77&createType=5&token=" + str(token_dict['token']) + "&lang=zh_CN",
+                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "sec-ch-ua-mobile": "?0",
+                "sec-ch-ua-platform": '"Windows"',
+                "sec-fetch-dest": "empty",
+                "sec-fetch-mode": "cors",
+                "sec-fetch-site": "same-origin",
+                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                "x-requested-with": "XMLHttpRequest",
+                'cookie': token_dict['cookie'],
+            }
+            params = {
+                "action": "list_ex",
+                "begin": str(cls.begin),
+                "count": "5",
+                "fakeid": fakeid_dict['fakeid'],
+                "type": "9",
+                "query": "",
+                "token": str(token_dict['token']),
+                "lang": "zh_CN",
+                "f": "json",
+                "ajax": "1",
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, verify=False)
+            r.close()
+            if r.json()["base_resp"]["err_msg"] == "invalid session":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 过期啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if r.json()["base_resp"]["err_msg"] == "freq control":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}, 操作人:{token_dict['operator']}, 更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler,f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if 'app_msg_list' not in r.json():
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if len(r.json()['app_msg_list']) == 0:
+                Common.logger(log_type, crawler).info('没有更多视频了\n')
+                return
+            else:
+                cls.begin += 5
+                app_msg_list = r.json()['app_msg_list']
+                for article_url in app_msg_list:
+                    # title
+                    video_title = article_url.get("title", "").replace('/', '').replace('\n', '') \
+                            .replace('.', '').replace('“', '').replace('”', '').replace(' ', '')\
+                            .replace('"', '').replace("'", "")
+                    # aid
+                    aid = article_url.get('aid', '')
+                    # create_time
+                    create_time = article_url.get('create_time', 0)
+                    publish_time_stamp = int(create_time)
+                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                    avatar_url = fakeid_dict['head_url']
+                    # cover_url
+                    cover_url = article_url.get('cover', '')
+                    # article_url
+                    article_url = article_url.get('link', '')
+                    video_url = cls.get_video_url(article_url, env)
+
+                    video_dict = {
+                        'video_id': aid,
+                        'video_title': video_title,
+                        'publish_time_stamp': publish_time_stamp,
+                        'publish_time_str': publish_time_str,
+                        'user_name': user_name,
+                        'play_cnt': 0,
+                        'comment_cnt': 0,
+                        'like_cnt': 0,
+                        'share_cnt': 0,
+                        'user_id': fakeid_dict['fakeid'],
+                        'avatar_url': avatar_url,
+                        'cover_url': cover_url,
+                        'article_url': article_url,
+                        'video_url': video_url,
+                        'session': f'gongzhonghao-author1-{int(time.time())}'
+                    }
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                    if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('min', 1000)):
+                        Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('min', 1000))}天\n")
+                        cls.begin = 0
+                        return
+
+                    if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
+                        Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
+                    # 标题敏感词过滤
+                    elif any(str(word) if str(word) in video_dict['video_title'] else False
+                             for word in get_config_from_mysql(log_type=log_type,
+                                                               source=crawler,
+                                                               env=env,
+                                                               text="filter",
+                                                               action="")) is True:
+                        Common.logger(log_type, crawler).info("标题已中过滤词\n")
+                    # 已下载判断
+                    elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                        Common.logger(log_type, crawler).info("视频已下载\n")
+                    # 标题相似度
+                    elif cls.title_like(log_type, crawler, video_dict['video_title'], env) is True:
+                        Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
+                    else:
+                        cls.download_publish(log_type=log_type,
+                                             crawler=crawler,
+                                             video_dict=video_dict,
+                                             rule_dict=rule_dict,
+                                             uid=uid,
+                                             oss_endpoint=oss_endpoint,
+                                             env=env)
+
+                Common.logger(log_type, crawler).info('休眠 60 秒\n')
+                time.sleep(60)
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="公众号" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    # 下载/上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, video_dict, rule_dict, uid, oss_endpoint, env):
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text="video",
+                               title=video_dict["video_title"], url=video_dict["video_url"])
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        # 获取视频时长
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler,
+                                    f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+        if ffmpeg_dict is None:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            return
+        video_dict["video_width"] = ffmpeg_dict["width"]
+        video_dict["video_height"] = ffmpeg_dict["height"]
+        video_dict["duration"] = ffmpeg_dict["duration"]
+        video_size = ffmpeg_dict["size"]
+        Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
+        Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
+        Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
+        Common.logger(log_type, crawler).info(f'video_size:{video_size}')
+        # 视频size=0,直接删除
+        if int(video_size) == 0 or cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            return
+        if cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
+            return
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text="cover",
+                               title=video_dict["video_title"], url=video_dict["cover_url"])
+        # 保存视频信息至 "./videos/{video_title}/info.txt"
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        strategy = "定向榜爬虫策略"
+        our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                  crawler=crawler,
+                                                  strategy=strategy,
+                                                  our_uid=uid,
+                                                  oss_endpoint=oss_endpoint,
+                                                  env=env)
+        if env == 'prod':
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+        else:
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+        Common.logger(log_type, crawler).info("视频上传完成")
+
+        if our_video_id is None:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+            return
+
+        insert_sql = f""" insert into crawler_video(video_id,
+                                                    out_user_id,
+                                                    platform,
+                                                    strategy,
+                                                    out_video_id,
+                                                    video_title,
+                                                    cover_url,
+                                                    video_url,
+                                                    duration,
+                                                    publish_time,
+                                                    play_cnt,
+                                                    crawler_rule,
+                                                    width,
+                                                    height)
+                                                    values({our_video_id},
+                                                    "{video_dict['user_id']}",
+                                                    "{cls.platform}",
+                                                    "定向爬虫策略",
+                                                    "{video_dict['video_id']}",
+                                                    "{video_dict['video_title']}",
+                                                    "{video_dict['cover_url']}",
+                                                    "{video_dict['video_url']}",
+                                                    {int(video_dict['duration'])},
+                                                    "{video_dict['publish_time_str']}",
+                                                    {int(video_dict['play_cnt'])},
+                                                    '{json.dumps(rule_dict)}',
+                                                    {int(video_dict['video_width'])},
+                                                    {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+        Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+
+        # 视频写入飞书
+        Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
+        # 视频ID工作表,首行写入数据
+        upload_time = int(time.time())
+        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                   "用户主页",
+                   video_dict['video_title'],
+                   video_dict['video_id'],
+                   our_video_link,
+                   int(video_dict['duration']),
+                   f"{video_dict['video_width']}*{video_dict['video_height']}",
+                   video_dict['publish_time_str'],
+                   video_dict['user_name'],
+                   video_dict['user_id'],
+                   video_dict['avatar_url'],
+                   video_dict['cover_url'],
+                   video_dict['article_url'],
+                   video_dict['video_url']]]
+        time.sleep(0.5)
+        Feishu.update_values(log_type, crawler, "47e39d", "F2:Z2", values)
+        Common.logger(log_type, crawler).info('视频下载/上传成功\n')
+
+    @classmethod
+    def get_all_videos(cls, log_type, crawler, user_list, rule_dict, oss_endpoint, env):
+        if len(user_list) == 0:
+            Common.logger(log_type, crawler).warning(f"抓取用户列表为空\n")
+            return
+        for user in user_list:
+            # try:
+            user_name = user['nick_name']
+            wechat_name = user['link']
+            uid = user['uid']
+            Common.logger(log_type, crawler).info(f'获取 {user_name} 公众号视频\n')
+            cls.get_videoList(log_type=log_type,
+                              crawler=crawler,
+                              wechat_name=wechat_name,
+                              rule_dict=rule_dict,
+                              user_name=user_name,
+                              uid=uid,
+                              oss_endpoint=oss_endpoint,
+                              env=env)
+            cls.begin = 0
+            Common.logger(log_type, crawler).info('休眠 60 秒\n')
+            time.sleep(60)
+            # except Exception as e:
+            #     Common.logger(log_type, crawler).info(f'get_all_videos异常:{e}\n')
+
+
+if __name__ == "__main__":
+    GongzhonghaoAuthor2.get_token("author", "gongzhonghao", "dev")
+    # print(get_config_from_mysql("author", "gongzhonghao", "dev", "filter", action=""))
+    pass

+ 573 - 0
gongzhonghao/gongzhonghao_author/gongzhonghao3_author.py

@@ -0,0 +1,573 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/28
+import datetime
+import difflib
+import json
+import os
+import shutil
+import sys
+import time
+from hashlib import md5
+import requests
+import urllib3
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
+from selenium.webdriver.common.by import By
+from selenium import webdriver
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.publish import Publish
+from common.scheduling_db import MysqlHelper
+from common.public import get_config_from_mysql
+
+
+class GongzhonghaoAuthor3:
+    # 翻页参数
+    begin = 0
+    platform = "公众号"
+
+    # 基础门槛规则
+    @staticmethod
+    def download_rule(log_type, crawler, video_dict, rule_dict):
+        """
+        下载视频的基本规则
+        :param log_type: 日志
+        :param crawler: 哪款爬虫
+        :param video_dict: 视频信息,字典格式
+        :param rule_dict: 规则信息,字典格式
+        :return: 满足规则,返回 True;反之,返回 False
+        """
+        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
+        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
+        if rule_play_cnt_max == 0:
+            rule_play_cnt_max = 100000000
+
+        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
+        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
+        if rule_duration_max == 0:
+            rule_duration_max = 100000000
+
+        rule_period_min = rule_dict.get('period', {}).get('min', 0)
+        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
+        # if rule_period_max == 0:
+        #     rule_period_max = 100000000
+
+        rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
+        rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
+        if rule_fans_cnt_max == 0:
+            rule_fans_cnt_max = 100000000
+
+        rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
+        rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
+        if rule_videos_cnt_max == 0:
+            rule_videos_cnt_max = 100000000
+
+        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
+        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
+        if rule_like_cnt_max == 0:
+            rule_like_cnt_max = 100000000
+
+        rule_width_min = rule_dict.get('width', {}).get('min', 0)
+        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
+        if rule_width_max == 0:
+            rule_width_max = 100000000
+
+        rule_height_min = rule_dict.get('height', {}).get('min', 0)
+        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
+        if rule_height_max == 0:
+            rule_height_max = 100000000
+
+        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
+        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
+        if rule_share_cnt_max == 0:
+            rule_share_cnt_max = 100000000
+
+        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
+        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
+        if rule_comment_cnt_max == 0:
+            rule_comment_cnt_max = 100000000
+
+        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
+        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 100000000)
+        if rule_publish_time_max == 0:
+            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
+
+        Common.logger(log_type, crawler).info(
+            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
+
+        if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
+                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
+                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
+                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
+                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
+                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
+                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
+                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min) \
+                and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min):
+            return True
+        else:
+            return False
+
+    @classmethod
+    def title_like(cls, log_type, crawler, title, env):
+        select_sql = f""" select * from crawler_video where platform="公众号" """
+        video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
+        if len(video_list) == 0:
+            return None
+        for video_dict in video_list:
+            video_title = video_dict["video_title"]
+            if difflib.SequenceMatcher(None, title, video_title).quick_ratio() >= 0.8:
+                return True
+            else:
+                pass
+
+    # 获取 token
+    @classmethod
+    def get_token(cls, log_type, crawler, env):
+        select_sql = f""" select * from crawler_config where source="{crawler}" and title LIKE "%公众号_3%";"""
+        configs = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
+        if len(configs) == 0:
+            # Common.logger(log_type, crawler).warning(f"公众号_3未配置token")
+            Feishu.bot(log_type, crawler, "公众号_3:未配置token")
+            time.sleep(60)
+            return None
+        token_dict = {
+            "token_id": configs[0]["id"],
+            "title": configs[0]["title"],
+            "token": dict(eval(configs[0]["config"]))["token"],
+            "cookie": dict(eval(configs[0]["config"]))["cookie"],
+            "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(configs[0]["update_time"]/1000))),
+            "operator": configs[0]["operator"]
+        }
+        for k, v in token_dict.items():
+            print(f"{k}:{v}")
+        return token_dict
+
+    # 获取用户 fakeid
+    @classmethod
+    def get_fakeid(cls, log_type, crawler, wechat_name, env):
+        while True:
+            token_dict = cls.get_token(log_type, crawler, env)
+            url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
+            headers = {
+                "accept": "*/*",
+                "accept-encoding": "gzip, deflate, br",
+                "accept-language": "zh-CN,zh;q=0.9",
+                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                           "&type=77&createType=5&token=1011071554&lang=zh_CN",
+                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "sec-ch-ua-mobile": "?0",
+                "sec-ch-ua-platform": '"Windows"',
+                "sec-fetch-dest": "empty",
+                "sec-fetch-mode": "cors",
+                "sec-fetch-site": "same-origin",
+                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                "x-requested-with": "XMLHttpRequest",
+                'cookie': token_dict['cookie'],
+            }
+            params = {
+                "action": "search_biz",
+                "begin": "0",
+                "count": "5",
+                "query": str(wechat_name),
+                "token": token_dict['token'],
+                "lang": "zh_CN",
+                "f": "json",
+                "ajax": "1",
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, verify=False)
+            r.close()
+            if r.json()["base_resp"]["err_msg"] == "invalid session":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 过期啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if r.json()["base_resp"]["err_msg"] == "freq control":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if "list" not in r.json() or len(r.json()["list"]) == 0:
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+
+            fakeid = r.json()["list"][0]["fakeid"]
+            head_url = r.json()["list"][0]["round_head_img"]
+            fakeid_dict = {'fakeid': fakeid, 'head_url': head_url}
+            return fakeid_dict
+
+    # 获取腾讯视频下载链接
+    @classmethod
+    def get_tencent_video_url(cls, video_id):
+        url = 'https://vv.video.qq.com/getinfo?vids=' + str(video_id) + '&platform=101001&charge=0&otype=json'
+        response = requests.get(url=url).text.replace('QZOutputJson=', '').replace('"};', '"}')
+        response = json.loads(response)
+        url = response['vl']['vi'][0]['ul']['ui'][0]['url']
+        fvkey = response['vl']['vi'][0]['fvkey']
+        video_url = url + str(video_id) + '.mp4?vkey=' + fvkey
+        return video_url
+
+    @classmethod
+    def get_video_url(cls, article_url, env):
+        # 打印请求配置
+        ca = DesiredCapabilities.CHROME
+        ca["goog:loggingPrefs"] = {"performance": "ALL"}
+
+        # 不打开浏览器运行
+        chrome_options = webdriver.ChromeOptions()
+        chrome_options.add_argument("headless")
+        chrome_options.add_argument(
+            f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
+        chrome_options.add_argument("--no-sandbox")
+
+        # driver初始化
+        if env == "prod":
+            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
+        else:
+            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
+                '/Users/wangkun/Downloads/chromedriver/chromedriver_v111/chromedriver'))
+
+        driver.implicitly_wait(10)
+        # Common.logger(log_type, crawler).info('打开文章链接')
+        driver.get(article_url)
+        time.sleep(1)
+
+        if len(driver.find_elements(By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]')) != 0:
+            video_url = driver.find_element(
+                By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]').get_attribute('src')
+        elif len(driver.find_elements(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]')) != 0:
+            iframe = driver.find_element(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]').get_attribute(
+                'src')
+            video_id = iframe.split('vid=')[-1].split('&')[0]
+            video_url = cls.get_tencent_video_url(video_id)
+        else:
+            video_url = 0
+        driver.quit()
+        return video_url
+
+    # 获取文章列表
+    @classmethod
+    def get_videoList(cls, log_type, crawler, wechat_name, rule_dict, user_name, uid, oss_endpoint, env):
+        # try:
+        while True:
+            token_dict = cls.get_token(log_type, crawler, env)
+            fakeid_dict = cls.get_fakeid(log_type=log_type,
+                                         crawler=crawler,
+                                         wechat_name=wechat_name,
+                                         env=env)
+            url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+            headers = {
+                "accept": "*/*",
+                "accept-encoding": "gzip, deflate, br",
+                "accept-language": "zh-CN,zh;q=0.9",
+                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                           "&type=77&createType=5&token=" + str(token_dict['token']) + "&lang=zh_CN",
+                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "sec-ch-ua-mobile": "?0",
+                "sec-ch-ua-platform": '"Windows"',
+                "sec-fetch-dest": "empty",
+                "sec-fetch-mode": "cors",
+                "sec-fetch-site": "same-origin",
+                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                "x-requested-with": "XMLHttpRequest",
+                'cookie': token_dict['cookie'],
+            }
+            params = {
+                "action": "list_ex",
+                "begin": str(cls.begin),
+                "count": "5",
+                "fakeid": fakeid_dict['fakeid'],
+                "type": "9",
+                "query": "",
+                "token": str(token_dict['token']),
+                "lang": "zh_CN",
+                "f": "json",
+                "ajax": "1",
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, verify=False)
+            r.close()
+            if r.json()["base_resp"]["err_msg"] == "invalid session":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 过期啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if r.json()["base_resp"]["err_msg"] == "freq control":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}, 操作人:{token_dict['operator']}, 更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler,f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if 'app_msg_list' not in r.json():
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if len(r.json()['app_msg_list']) == 0:
+                Common.logger(log_type, crawler).info('没有更多视频了\n')
+                return
+            else:
+                cls.begin += 5
+                app_msg_list = r.json()['app_msg_list']
+                for article_url in app_msg_list:
+                    # title
+                    video_title = article_url.get("title", "").replace('/', '').replace('\n', '') \
+                            .replace('.', '').replace('“', '').replace('”', '').replace(' ', '')\
+                            .replace('"', '').replace("'", "")
+                    # aid
+                    aid = article_url.get('aid', '')
+                    # create_time
+                    create_time = article_url.get('create_time', 0)
+                    publish_time_stamp = int(create_time)
+                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                    avatar_url = fakeid_dict['head_url']
+                    # cover_url
+                    cover_url = article_url.get('cover', '')
+                    # article_url
+                    article_url = article_url.get('link', '')
+                    video_url = cls.get_video_url(article_url, env)
+
+                    video_dict = {
+                        'video_id': aid,
+                        'video_title': video_title,
+                        'publish_time_stamp': publish_time_stamp,
+                        'publish_time_str': publish_time_str,
+                        'user_name': user_name,
+                        'play_cnt': 0,
+                        'comment_cnt': 0,
+                        'like_cnt': 0,
+                        'share_cnt': 0,
+                        'user_id': fakeid_dict['fakeid'],
+                        'avatar_url': avatar_url,
+                        'cover_url': cover_url,
+                        'article_url': article_url,
+                        'video_url': video_url,
+                        'session': f'gongzhonghao-author1-{int(time.time())}'
+                    }
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                    if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('min', 1000)):
+                        Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('min', 1000))}天\n")
+                        cls.begin = 0
+                        return
+
+                    if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
+                        Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
+                    # 标题敏感词过滤
+                    elif any(str(word) if str(word) in video_dict['video_title'] else False
+                             for word in get_config_from_mysql(log_type=log_type,
+                                                               source=crawler,
+                                                               env=env,
+                                                               text="filter",
+                                                               action="")) is True:
+                        Common.logger(log_type, crawler).info("标题已中过滤词\n")
+                    # 已下载判断
+                    elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                        Common.logger(log_type, crawler).info("视频已下载\n")
+                    # 标题相似度
+                    elif cls.title_like(log_type, crawler, video_dict['video_title'], env) is True:
+                        Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
+                    else:
+                        cls.download_publish(log_type=log_type,
+                                             crawler=crawler,
+                                             video_dict=video_dict,
+                                             rule_dict=rule_dict,
+                                             uid=uid,
+                                             oss_endpoint=oss_endpoint,
+                                             env=env)
+
+                Common.logger(log_type, crawler).info('休眠 60 秒\n')
+                time.sleep(60)
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="公众号" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    # 下载/上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, video_dict, rule_dict, uid, oss_endpoint, env):
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text="video",
+                               title=video_dict["video_title"], url=video_dict["video_url"])
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        # 获取视频时长
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler,
+                                    f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+        if ffmpeg_dict is None:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            return
+        video_dict["video_width"] = ffmpeg_dict["width"]
+        video_dict["video_height"] = ffmpeg_dict["height"]
+        video_dict["duration"] = ffmpeg_dict["duration"]
+        video_size = ffmpeg_dict["size"]
+        Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
+        Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
+        Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
+        Common.logger(log_type, crawler).info(f'video_size:{video_size}')
+        # 视频size=0,直接删除
+        if int(video_size) == 0 or cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            return
+        if cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
+            return
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text="cover",
+                               title=video_dict["video_title"], url=video_dict["cover_url"])
+        # 保存视频信息至 "./videos/{video_title}/info.txt"
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        strategy = "定向榜爬虫策略"
+        our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                  crawler=crawler,
+                                                  strategy=strategy,
+                                                  our_uid=uid,
+                                                  oss_endpoint=oss_endpoint,
+                                                  env=env)
+        if env == 'prod':
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+        else:
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+        Common.logger(log_type, crawler).info("视频上传完成")
+
+        if our_video_id is None:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+            return
+
+        insert_sql = f""" insert into crawler_video(video_id,
+                                                    out_user_id,
+                                                    platform,
+                                                    strategy,
+                                                    out_video_id,
+                                                    video_title,
+                                                    cover_url,
+                                                    video_url,
+                                                    duration,
+                                                    publish_time,
+                                                    play_cnt,
+                                                    crawler_rule,
+                                                    width,
+                                                    height)
+                                                    values({our_video_id},
+                                                    "{video_dict['user_id']}",
+                                                    "{cls.platform}",
+                                                    "定向爬虫策略",
+                                                    "{video_dict['video_id']}",
+                                                    "{video_dict['video_title']}",
+                                                    "{video_dict['cover_url']}",
+                                                    "{video_dict['video_url']}",
+                                                    {int(video_dict['duration'])},
+                                                    "{video_dict['publish_time_str']}",
+                                                    {int(video_dict['play_cnt'])},
+                                                    '{json.dumps(rule_dict)}',
+                                                    {int(video_dict['video_width'])},
+                                                    {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+        Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+
+        # 视频写入飞书
+        Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
+        # 视频ID工作表,首行写入数据
+        upload_time = int(time.time())
+        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                   "用户主页",
+                   video_dict['video_title'],
+                   video_dict['video_id'],
+                   our_video_link,
+                   int(video_dict['duration']),
+                   f"{video_dict['video_width']}*{video_dict['video_height']}",
+                   video_dict['publish_time_str'],
+                   video_dict['user_name'],
+                   video_dict['user_id'],
+                   video_dict['avatar_url'],
+                   video_dict['cover_url'],
+                   video_dict['article_url'],
+                   video_dict['video_url']]]
+        time.sleep(0.5)
+        Feishu.update_values(log_type, crawler, "47e39d", "F2:Z2", values)
+        Common.logger(log_type, crawler).info('视频下载/上传成功\n')
+
+    @classmethod
+    def get_all_videos(cls, log_type, crawler, user_list, rule_dict, oss_endpoint, env):
+        if len(user_list) == 0:
+            Common.logger(log_type, crawler).warning(f"抓取用户列表为空\n")
+            return
+        for user in user_list:
+            # try:
+            user_name = user['nick_name']
+            wechat_name = user['link']
+            uid = user['uid']
+            Common.logger(log_type, crawler).info(f'获取 {user_name} 公众号视频\n')
+            cls.get_videoList(log_type=log_type,
+                              crawler=crawler,
+                              wechat_name=wechat_name,
+                              rule_dict=rule_dict,
+                              user_name=user_name,
+                              uid=uid,
+                              oss_endpoint=oss_endpoint,
+                              env=env)
+            cls.begin = 0
+            Common.logger(log_type, crawler).info('休眠 60 秒\n')
+            time.sleep(60)
+            # except Exception as e:
+            #     Common.logger(log_type, crawler).info(f'get_all_videos异常:{e}\n')
+
+
+if __name__ == "__main__":
+    GongzhonghaoAuthor3.get_token("author", "gongzhonghao", "dev")
+    # print(get_config_from_mysql("author", "gongzhonghao", "dev", "filter", action=""))
+    pass

+ 573 - 0
gongzhonghao/gongzhonghao_author/gongzhonghao4_author.py

@@ -0,0 +1,573 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/28
+import datetime
+import difflib
+import json
+import os
+import shutil
+import sys
+import time
+from hashlib import md5
+import requests
+import urllib3
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
+from selenium.webdriver.common.by import By
+from selenium import webdriver
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.publish import Publish
+from common.scheduling_db import MysqlHelper
+from common.public import get_config_from_mysql
+
+
+class GongzhonghaoAuthor4:
+    # 翻页参数
+    begin = 0
+    platform = "公众号"
+
+    # 基础门槛规则
+    @staticmethod
+    def download_rule(log_type, crawler, video_dict, rule_dict):
+        """
+        下载视频的基本规则
+        :param log_type: 日志
+        :param crawler: 哪款爬虫
+        :param video_dict: 视频信息,字典格式
+        :param rule_dict: 规则信息,字典格式
+        :return: 满足规则,返回 True;反之,返回 False
+        """
+        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
+        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
+        if rule_play_cnt_max == 0:
+            rule_play_cnt_max = 100000000
+
+        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
+        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
+        if rule_duration_max == 0:
+            rule_duration_max = 100000000
+
+        rule_period_min = rule_dict.get('period', {}).get('min', 0)
+        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
+        # if rule_period_max == 0:
+        #     rule_period_max = 100000000
+
+        rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
+        rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
+        if rule_fans_cnt_max == 0:
+            rule_fans_cnt_max = 100000000
+
+        rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
+        rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
+        if rule_videos_cnt_max == 0:
+            rule_videos_cnt_max = 100000000
+
+        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
+        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
+        if rule_like_cnt_max == 0:
+            rule_like_cnt_max = 100000000
+
+        rule_width_min = rule_dict.get('width', {}).get('min', 0)
+        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
+        if rule_width_max == 0:
+            rule_width_max = 100000000
+
+        rule_height_min = rule_dict.get('height', {}).get('min', 0)
+        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
+        if rule_height_max == 0:
+            rule_height_max = 100000000
+
+        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
+        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
+        if rule_share_cnt_max == 0:
+            rule_share_cnt_max = 100000000
+
+        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
+        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
+        if rule_comment_cnt_max == 0:
+            rule_comment_cnt_max = 100000000
+
+        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
+        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 100000000)
+        if rule_publish_time_max == 0:
+            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
+
+        Common.logger(log_type, crawler).info(
+            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
+
+        if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
+                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
+                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
+                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
+                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
+                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
+                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
+                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min) \
+                and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min):
+            return True
+        else:
+            return False
+
+    @classmethod
+    def title_like(cls, log_type, crawler, title, env):
+        select_sql = f""" select * from crawler_video where platform="公众号" """
+        video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
+        if len(video_list) == 0:
+            return None
+        for video_dict in video_list:
+            video_title = video_dict["video_title"]
+            if difflib.SequenceMatcher(None, title, video_title).quick_ratio() >= 0.8:
+                return True
+            else:
+                pass
+
+    # 获取 token
+    @classmethod
+    def get_token(cls, log_type, crawler, env):
+        select_sql = f""" select * from crawler_config where source="{crawler}" and title LIKE "%公众号_4%";"""
+        configs = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
+        if len(configs) == 0:
+            # Common.logger(log_type, crawler).warning(f"公众号_3未配置token")
+            Feishu.bot(log_type, crawler, "公众号_4:未配置token")
+            time.sleep(60)
+            return None
+        token_dict = {
+            "token_id": configs[0]["id"],
+            "title": configs[0]["title"],
+            "token": dict(eval(configs[0]["config"]))["token"],
+            "cookie": dict(eval(configs[0]["config"]))["cookie"],
+            "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(configs[0]["update_time"]/1000))),
+            "operator": configs[0]["operator"]
+        }
+        for k, v in token_dict.items():
+            print(f"{k}:{v}")
+        return token_dict
+
+    # 获取用户 fakeid
+    @classmethod
+    def get_fakeid(cls, log_type, crawler, wechat_name, env):
+        while True:
+            token_dict = cls.get_token(log_type, crawler, env)
+            url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
+            headers = {
+                "accept": "*/*",
+                "accept-encoding": "gzip, deflate, br",
+                "accept-language": "zh-CN,zh;q=0.9",
+                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                           "&type=77&createType=5&token=1011071554&lang=zh_CN",
+                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "sec-ch-ua-mobile": "?0",
+                "sec-ch-ua-platform": '"Windows"',
+                "sec-fetch-dest": "empty",
+                "sec-fetch-mode": "cors",
+                "sec-fetch-site": "same-origin",
+                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                "x-requested-with": "XMLHttpRequest",
+                'cookie': token_dict['cookie'],
+            }
+            params = {
+                "action": "search_biz",
+                "begin": "0",
+                "count": "5",
+                "query": str(wechat_name),
+                "token": token_dict['token'],
+                "lang": "zh_CN",
+                "f": "json",
+                "ajax": "1",
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, verify=False)
+            r.close()
+            if r.json()["base_resp"]["err_msg"] == "invalid session":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 过期啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if r.json()["base_resp"]["err_msg"] == "freq control":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if "list" not in r.json() or len(r.json()["list"]) == 0:
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+
+            fakeid = r.json()["list"][0]["fakeid"]
+            head_url = r.json()["list"][0]["round_head_img"]
+            fakeid_dict = {'fakeid': fakeid, 'head_url': head_url}
+            return fakeid_dict
+
+    # 获取腾讯视频下载链接
+    @classmethod
+    def get_tencent_video_url(cls, video_id):
+        url = 'https://vv.video.qq.com/getinfo?vids=' + str(video_id) + '&platform=101001&charge=0&otype=json'
+        response = requests.get(url=url).text.replace('QZOutputJson=', '').replace('"};', '"}')
+        response = json.loads(response)
+        url = response['vl']['vi'][0]['ul']['ui'][0]['url']
+        fvkey = response['vl']['vi'][0]['fvkey']
+        video_url = url + str(video_id) + '.mp4?vkey=' + fvkey
+        return video_url
+
+    @classmethod
+    def get_video_url(cls, article_url, env):
+        # 打印请求配置
+        ca = DesiredCapabilities.CHROME
+        ca["goog:loggingPrefs"] = {"performance": "ALL"}
+
+        # 不打开浏览器运行
+        chrome_options = webdriver.ChromeOptions()
+        chrome_options.add_argument("headless")
+        chrome_options.add_argument(
+            f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
+        chrome_options.add_argument("--no-sandbox")
+
+        # driver初始化
+        if env == "prod":
+            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
+        else:
+            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
+                '/Users/wangkun/Downloads/chromedriver/chromedriver_v111/chromedriver'))
+
+        driver.implicitly_wait(10)
+        # Common.logger(log_type, crawler).info('打开文章链接')
+        driver.get(article_url)
+        time.sleep(1)
+
+        if len(driver.find_elements(By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]')) != 0:
+            video_url = driver.find_element(
+                By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]').get_attribute('src')
+        elif len(driver.find_elements(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]')) != 0:
+            iframe = driver.find_element(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]').get_attribute(
+                'src')
+            video_id = iframe.split('vid=')[-1].split('&')[0]
+            video_url = cls.get_tencent_video_url(video_id)
+        else:
+            video_url = 0
+        driver.quit()
+        return video_url
+
+    # 获取文章列表
+    @classmethod
+    def get_videoList(cls, log_type, crawler, wechat_name, rule_dict, user_name, uid, oss_endpoint, env):
+        # try:
+        while True:
+            token_dict = cls.get_token(log_type, crawler, env)
+            fakeid_dict = cls.get_fakeid(log_type=log_type,
+                                         crawler=crawler,
+                                         wechat_name=wechat_name,
+                                         env=env)
+            url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+            headers = {
+                "accept": "*/*",
+                "accept-encoding": "gzip, deflate, br",
+                "accept-language": "zh-CN,zh;q=0.9",
+                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                           "&type=77&createType=5&token=" + str(token_dict['token']) + "&lang=zh_CN",
+                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "sec-ch-ua-mobile": "?0",
+                "sec-ch-ua-platform": '"Windows"',
+                "sec-fetch-dest": "empty",
+                "sec-fetch-mode": "cors",
+                "sec-fetch-site": "same-origin",
+                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                "x-requested-with": "XMLHttpRequest",
+                'cookie': token_dict['cookie'],
+            }
+            params = {
+                "action": "list_ex",
+                "begin": str(cls.begin),
+                "count": "5",
+                "fakeid": fakeid_dict['fakeid'],
+                "type": "9",
+                "query": "",
+                "token": str(token_dict['token']),
+                "lang": "zh_CN",
+                "f": "json",
+                "ajax": "1",
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, verify=False)
+            r.close()
+            if r.json()["base_resp"]["err_msg"] == "invalid session":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 过期啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if r.json()["base_resp"]["err_msg"] == "freq control":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}, 操作人:{token_dict['operator']}, 更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler,f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if 'app_msg_list' not in r.json():
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if len(r.json()['app_msg_list']) == 0:
+                Common.logger(log_type, crawler).info('没有更多视频了\n')
+                return
+            else:
+                cls.begin += 5
+                app_msg_list = r.json()['app_msg_list']
+                for article_url in app_msg_list:
+                    # title
+                    video_title = article_url.get("title", "").replace('/', '').replace('\n', '') \
+                            .replace('.', '').replace('“', '').replace('”', '').replace(' ', '')\
+                            .replace('"', '').replace("'", "")
+                    # aid
+                    aid = article_url.get('aid', '')
+                    # create_time
+                    create_time = article_url.get('create_time', 0)
+                    publish_time_stamp = int(create_time)
+                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                    avatar_url = fakeid_dict['head_url']
+                    # cover_url
+                    cover_url = article_url.get('cover', '')
+                    # article_url
+                    article_url = article_url.get('link', '')
+                    video_url = cls.get_video_url(article_url, env)
+
+                    video_dict = {
+                        'video_id': aid,
+                        'video_title': video_title,
+                        'publish_time_stamp': publish_time_stamp,
+                        'publish_time_str': publish_time_str,
+                        'user_name': user_name,
+                        'play_cnt': 0,
+                        'comment_cnt': 0,
+                        'like_cnt': 0,
+                        'share_cnt': 0,
+                        'user_id': fakeid_dict['fakeid'],
+                        'avatar_url': avatar_url,
+                        'cover_url': cover_url,
+                        'article_url': article_url,
+                        'video_url': video_url,
+                        'session': f'gongzhonghao-author1-{int(time.time())}'
+                    }
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                    if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('min', 1000)):
+                        Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('min', 1000))}天\n")
+                        cls.begin = 0
+                        return
+
+                    if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
+                        Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
+                    # 标题敏感词过滤
+                    elif any(str(word) if str(word) in video_dict['video_title'] else False
+                             for word in get_config_from_mysql(log_type=log_type,
+                                                               source=crawler,
+                                                               env=env,
+                                                               text="filter",
+                                                               action="")) is True:
+                        Common.logger(log_type, crawler).info("标题已中过滤词\n")
+                    # 已下载判断
+                    elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                        Common.logger(log_type, crawler).info("视频已下载\n")
+                    # 标题相似度
+                    elif cls.title_like(log_type, crawler, video_dict['video_title'], env) is True:
+                        Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
+                    else:
+                        cls.download_publish(log_type=log_type,
+                                             crawler=crawler,
+                                             video_dict=video_dict,
+                                             rule_dict=rule_dict,
+                                             uid=uid,
+                                             oss_endpoint=oss_endpoint,
+                                             env=env)
+
+                Common.logger(log_type, crawler).info('休眠 60 秒\n')
+                time.sleep(60)
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="公众号" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    # 下载/上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, video_dict, rule_dict, uid, oss_endpoint, env):
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text="video",
+                               title=video_dict["video_title"], url=video_dict["video_url"])
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        # 获取视频时长
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler,
+                                    f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+        if ffmpeg_dict is None:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            return
+        video_dict["video_width"] = ffmpeg_dict["width"]
+        video_dict["video_height"] = ffmpeg_dict["height"]
+        video_dict["duration"] = ffmpeg_dict["duration"]
+        video_size = ffmpeg_dict["size"]
+        Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
+        Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
+        Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
+        Common.logger(log_type, crawler).info(f'video_size:{video_size}')
+        # 视频size=0,直接删除
+        if int(video_size) == 0 or cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            return
+        if cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
+            return
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text="cover",
+                               title=video_dict["video_title"], url=video_dict["cover_url"])
+        # 保存视频信息至 "./videos/{video_title}/info.txt"
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        strategy = "定向榜爬虫策略"
+        our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                  crawler=crawler,
+                                                  strategy=strategy,
+                                                  our_uid=uid,
+                                                  oss_endpoint=oss_endpoint,
+                                                  env=env)
+        if env == 'prod':
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+        else:
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+        Common.logger(log_type, crawler).info("视频上传完成")
+
+        if our_video_id is None:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+            return
+
+        insert_sql = f""" insert into crawler_video(video_id,
+                                                    out_user_id,
+                                                    platform,
+                                                    strategy,
+                                                    out_video_id,
+                                                    video_title,
+                                                    cover_url,
+                                                    video_url,
+                                                    duration,
+                                                    publish_time,
+                                                    play_cnt,
+                                                    crawler_rule,
+                                                    width,
+                                                    height)
+                                                    values({our_video_id},
+                                                    "{video_dict['user_id']}",
+                                                    "{cls.platform}",
+                                                    "定向爬虫策略",
+                                                    "{video_dict['video_id']}",
+                                                    "{video_dict['video_title']}",
+                                                    "{video_dict['cover_url']}",
+                                                    "{video_dict['video_url']}",
+                                                    {int(video_dict['duration'])},
+                                                    "{video_dict['publish_time_str']}",
+                                                    {int(video_dict['play_cnt'])},
+                                                    '{json.dumps(rule_dict)}',
+                                                    {int(video_dict['video_width'])},
+                                                    {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+        Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+
+        # 视频写入飞书
+        Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
+        # 视频ID工作表,首行写入数据
+        upload_time = int(time.time())
+        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                   "用户主页",
+                   video_dict['video_title'],
+                   video_dict['video_id'],
+                   our_video_link,
+                   int(video_dict['duration']),
+                   f"{video_dict['video_width']}*{video_dict['video_height']}",
+                   video_dict['publish_time_str'],
+                   video_dict['user_name'],
+                   video_dict['user_id'],
+                   video_dict['avatar_url'],
+                   video_dict['cover_url'],
+                   video_dict['article_url'],
+                   video_dict['video_url']]]
+        time.sleep(0.5)
+        Feishu.update_values(log_type, crawler, "47e39d", "F2:Z2", values)
+        Common.logger(log_type, crawler).info('视频下载/上传成功\n')
+
+    @classmethod
+    def get_all_videos(cls, log_type, crawler, user_list, rule_dict, oss_endpoint, env):
+        if len(user_list) == 0:
+            Common.logger(log_type, crawler).warning(f"抓取用户列表为空\n")
+            return
+        for user in user_list:
+            # try:
+            user_name = user['nick_name']
+            wechat_name = user['link']
+            uid = user['uid']
+            Common.logger(log_type, crawler).info(f'获取 {user_name} 公众号视频\n')
+            cls.get_videoList(log_type=log_type,
+                              crawler=crawler,
+                              wechat_name=wechat_name,
+                              rule_dict=rule_dict,
+                              user_name=user_name,
+                              uid=uid,
+                              oss_endpoint=oss_endpoint,
+                              env=env)
+            cls.begin = 0
+            Common.logger(log_type, crawler).info('休眠 60 秒\n')
+            time.sleep(60)
+            # except Exception as e:
+            #     Common.logger(log_type, crawler).info(f'get_all_videos异常:{e}\n')
+
+
+if __name__ == "__main__":
+    GongzhonghaoAuthor4.get_token("author", "gongzhonghao", "dev")
+    # print(get_config_from_mysql("author", "gongzhonghao", "dev", "filter", action=""))
+    pass

+ 573 - 0
gongzhonghao/gongzhonghao_author/gongzhonghao5_author.py

@@ -0,0 +1,573 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/28
+import datetime
+import difflib
+import json
+import os
+import shutil
+import sys
+import time
+from hashlib import md5
+import requests
+import urllib3
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
+from selenium.webdriver.common.by import By
+from selenium import webdriver
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.publish import Publish
+from common.scheduling_db import MysqlHelper
+from common.public import get_config_from_mysql
+
+
+class GongzhonghaoAuthor5:
+    # 翻页参数
+    begin = 0
+    platform = "公众号"
+
+    # 基础门槛规则
+    @staticmethod
+    def download_rule(log_type, crawler, video_dict, rule_dict):
+        """
+        下载视频的基本规则
+        :param log_type: 日志
+        :param crawler: 哪款爬虫
+        :param video_dict: 视频信息,字典格式
+        :param rule_dict: 规则信息,字典格式
+        :return: 满足规则,返回 True;反之,返回 False
+        """
+        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
+        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
+        if rule_play_cnt_max == 0:
+            rule_play_cnt_max = 100000000
+
+        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
+        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
+        if rule_duration_max == 0:
+            rule_duration_max = 100000000
+
+        rule_period_min = rule_dict.get('period', {}).get('min', 0)
+        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
+        # if rule_period_max == 0:
+        #     rule_period_max = 100000000
+
+        rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
+        rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
+        if rule_fans_cnt_max == 0:
+            rule_fans_cnt_max = 100000000
+
+        rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
+        rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
+        if rule_videos_cnt_max == 0:
+            rule_videos_cnt_max = 100000000
+
+        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
+        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
+        if rule_like_cnt_max == 0:
+            rule_like_cnt_max = 100000000
+
+        rule_width_min = rule_dict.get('width', {}).get('min', 0)
+        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
+        if rule_width_max == 0:
+            rule_width_max = 100000000
+
+        rule_height_min = rule_dict.get('height', {}).get('min', 0)
+        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
+        if rule_height_max == 0:
+            rule_height_max = 100000000
+
+        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
+        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
+        if rule_share_cnt_max == 0:
+            rule_share_cnt_max = 100000000
+
+        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
+        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
+        if rule_comment_cnt_max == 0:
+            rule_comment_cnt_max = 100000000
+
+        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
+        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 100000000)
+        if rule_publish_time_max == 0:
+            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
+
+        Common.logger(log_type, crawler).info(
+            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
+
+        if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
+                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
+                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
+                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
+                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
+                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
+                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
+                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min) \
+                and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min):
+            return True
+        else:
+            return False
+
+    @classmethod
+    def title_like(cls, log_type, crawler, title, env):
+        select_sql = f""" select * from crawler_video where platform="公众号" """
+        video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
+        if len(video_list) == 0:
+            return None
+        for video_dict in video_list:
+            video_title = video_dict["video_title"]
+            if difflib.SequenceMatcher(None, title, video_title).quick_ratio() >= 0.8:
+                return True
+            else:
+                pass
+
+    # 获取 token
+    @classmethod
+    def get_token(cls, log_type, crawler, env):
+        select_sql = f""" select * from crawler_config where source="{crawler}" and title LIKE "%公众号_5%";"""
+        configs = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
+        if len(configs) == 0:
+            # Common.logger(log_type, crawler).warning(f"公众号_3未配置token")
+            Feishu.bot(log_type, crawler, "公众号_5:未配置token")
+            time.sleep(60)
+            return None
+        token_dict = {
+            "token_id": configs[0]["id"],
+            "title": configs[0]["title"],
+            "token": dict(eval(configs[0]["config"]))["token"],
+            "cookie": dict(eval(configs[0]["config"]))["cookie"],
+            "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(configs[0]["update_time"]/1000))),
+            "operator": configs[0]["operator"]
+        }
+        for k, v in token_dict.items():
+            print(f"{k}:{v}")
+        return token_dict
+
+    # 获取用户 fakeid
+    @classmethod
+    def get_fakeid(cls, log_type, crawler, wechat_name, env):
+        while True:
+            token_dict = cls.get_token(log_type, crawler, env)
+            url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
+            headers = {
+                "accept": "*/*",
+                "accept-encoding": "gzip, deflate, br",
+                "accept-language": "zh-CN,zh;q=0.9",
+                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                           "&type=77&createType=5&token=1011071554&lang=zh_CN",
+                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "sec-ch-ua-mobile": "?0",
+                "sec-ch-ua-platform": '"Windows"',
+                "sec-fetch-dest": "empty",
+                "sec-fetch-mode": "cors",
+                "sec-fetch-site": "same-origin",
+                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                "x-requested-with": "XMLHttpRequest",
+                'cookie': token_dict['cookie'],
+            }
+            params = {
+                "action": "search_biz",
+                "begin": "0",
+                "count": "5",
+                "query": str(wechat_name),
+                "token": token_dict['token'],
+                "lang": "zh_CN",
+                "f": "json",
+                "ajax": "1",
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, verify=False)
+            r.close()
+            if r.json()["base_resp"]["err_msg"] == "invalid session":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 过期啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if r.json()["base_resp"]["err_msg"] == "freq control":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if "list" not in r.json() or len(r.json()["list"]) == 0:
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+
+            fakeid = r.json()["list"][0]["fakeid"]
+            head_url = r.json()["list"][0]["round_head_img"]
+            fakeid_dict = {'fakeid': fakeid, 'head_url': head_url}
+            return fakeid_dict
+
+    # 获取腾讯视频下载链接
+    @classmethod
+    def get_tencent_video_url(cls, video_id):
+        url = 'https://vv.video.qq.com/getinfo?vids=' + str(video_id) + '&platform=101001&charge=0&otype=json'
+        response = requests.get(url=url).text.replace('QZOutputJson=', '').replace('"};', '"}')
+        response = json.loads(response)
+        url = response['vl']['vi'][0]['ul']['ui'][0]['url']
+        fvkey = response['vl']['vi'][0]['fvkey']
+        video_url = url + str(video_id) + '.mp4?vkey=' + fvkey
+        return video_url
+
+    @classmethod
+    def get_video_url(cls, article_url, env):
+        # 打印请求配置
+        ca = DesiredCapabilities.CHROME
+        ca["goog:loggingPrefs"] = {"performance": "ALL"}
+
+        # 不打开浏览器运行
+        chrome_options = webdriver.ChromeOptions()
+        chrome_options.add_argument("headless")
+        chrome_options.add_argument(
+            f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
+        chrome_options.add_argument("--no-sandbox")
+
+        # driver初始化
+        if env == "prod":
+            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
+        else:
+            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
+                '/Users/wangkun/Downloads/chromedriver/chromedriver_v111/chromedriver'))
+
+        driver.implicitly_wait(10)
+        # Common.logger(log_type, crawler).info('打开文章链接')
+        driver.get(article_url)
+        time.sleep(1)
+
+        if len(driver.find_elements(By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]')) != 0:
+            video_url = driver.find_element(
+                By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]').get_attribute('src')
+        elif len(driver.find_elements(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]')) != 0:
+            iframe = driver.find_element(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]').get_attribute(
+                'src')
+            video_id = iframe.split('vid=')[-1].split('&')[0]
+            video_url = cls.get_tencent_video_url(video_id)
+        else:
+            video_url = 0
+        driver.quit()
+        return video_url
+
+    # 获取文章列表
+    @classmethod
+    def get_videoList(cls, log_type, crawler, wechat_name, rule_dict, user_name, uid, oss_endpoint, env):
+        # try:
+        while True:
+            token_dict = cls.get_token(log_type, crawler, env)
+            fakeid_dict = cls.get_fakeid(log_type=log_type,
+                                         crawler=crawler,
+                                         wechat_name=wechat_name,
+                                         env=env)
+            url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+            headers = {
+                "accept": "*/*",
+                "accept-encoding": "gzip, deflate, br",
+                "accept-language": "zh-CN,zh;q=0.9",
+                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                           "&type=77&createType=5&token=" + str(token_dict['token']) + "&lang=zh_CN",
+                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "sec-ch-ua-mobile": "?0",
+                "sec-ch-ua-platform": '"Windows"',
+                "sec-fetch-dest": "empty",
+                "sec-fetch-mode": "cors",
+                "sec-fetch-site": "same-origin",
+                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                "x-requested-with": "XMLHttpRequest",
+                'cookie': token_dict['cookie'],
+            }
+            params = {
+                "action": "list_ex",
+                "begin": str(cls.begin),
+                "count": "5",
+                "fakeid": fakeid_dict['fakeid'],
+                "type": "9",
+                "query": "",
+                "token": str(token_dict['token']),
+                "lang": "zh_CN",
+                "f": "json",
+                "ajax": "1",
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, verify=False)
+            r.close()
+            if r.json()["base_resp"]["err_msg"] == "invalid session":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 过期啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if r.json()["base_resp"]["err_msg"] == "freq control":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}, 操作人:{token_dict['operator']}, 更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler,f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if 'app_msg_list' not in r.json():
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if len(r.json()['app_msg_list']) == 0:
+                Common.logger(log_type, crawler).info('没有更多视频了\n')
+                return
+            else:
+                cls.begin += 5
+                app_msg_list = r.json()['app_msg_list']
+                for article_url in app_msg_list:
+                    # title
+                    video_title = article_url.get("title", "").replace('/', '').replace('\n', '') \
+                            .replace('.', '').replace('“', '').replace('”', '').replace(' ', '')\
+                            .replace('"', '').replace("'", "")
+                    # aid
+                    aid = article_url.get('aid', '')
+                    # create_time
+                    create_time = article_url.get('create_time', 0)
+                    publish_time_stamp = int(create_time)
+                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                    avatar_url = fakeid_dict['head_url']
+                    # cover_url
+                    cover_url = article_url.get('cover', '')
+                    # article_url
+                    article_url = article_url.get('link', '')
+                    video_url = cls.get_video_url(article_url, env)
+
+                    video_dict = {
+                        'video_id': aid,
+                        'video_title': video_title,
+                        'publish_time_stamp': publish_time_stamp,
+                        'publish_time_str': publish_time_str,
+                        'user_name': user_name,
+                        'play_cnt': 0,
+                        'comment_cnt': 0,
+                        'like_cnt': 0,
+                        'share_cnt': 0,
+                        'user_id': fakeid_dict['fakeid'],
+                        'avatar_url': avatar_url,
+                        'cover_url': cover_url,
+                        'article_url': article_url,
+                        'video_url': video_url,
+                        'session': f'gongzhonghao-author1-{int(time.time())}'
+                    }
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                    if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('min', 1000)):
+                        Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('min', 1000))}天\n")
+                        cls.begin = 0
+                        return
+
+                    if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
+                        Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
+                    # 标题敏感词过滤
+                    elif any(str(word) if str(word) in video_dict['video_title'] else False
+                             for word in get_config_from_mysql(log_type=log_type,
+                                                               source=crawler,
+                                                               env=env,
+                                                               text="filter",
+                                                               action="")) is True:
+                        Common.logger(log_type, crawler).info("标题已中过滤词\n")
+                    # 已下载判断
+                    elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                        Common.logger(log_type, crawler).info("视频已下载\n")
+                    # 标题相似度
+                    elif cls.title_like(log_type, crawler, video_dict['video_title'], env) is True:
+                        Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
+                    else:
+                        cls.download_publish(log_type=log_type,
+                                             crawler=crawler,
+                                             video_dict=video_dict,
+                                             rule_dict=rule_dict,
+                                             uid=uid,
+                                             oss_endpoint=oss_endpoint,
+                                             env=env)
+
+                Common.logger(log_type, crawler).info('休眠 60 秒\n')
+                time.sleep(60)
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="公众号" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    # 下载/上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, video_dict, rule_dict, uid, oss_endpoint, env):
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text="video",
+                               title=video_dict["video_title"], url=video_dict["video_url"])
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        # 获取视频时长
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler,
+                                    f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+        if ffmpeg_dict is None:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            return
+        video_dict["video_width"] = ffmpeg_dict["width"]
+        video_dict["video_height"] = ffmpeg_dict["height"]
+        video_dict["duration"] = ffmpeg_dict["duration"]
+        video_size = ffmpeg_dict["size"]
+        Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
+        Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
+        Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
+        Common.logger(log_type, crawler).info(f'video_size:{video_size}')
+        # 视频size=0,直接删除
+        if int(video_size) == 0 or cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            return
+        if cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
+            return
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text="cover",
+                               title=video_dict["video_title"], url=video_dict["cover_url"])
+        # 保存视频信息至 "./videos/{video_title}/info.txt"
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        strategy = "定向榜爬虫策略"
+        our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                  crawler=crawler,
+                                                  strategy=strategy,
+                                                  our_uid=uid,
+                                                  oss_endpoint=oss_endpoint,
+                                                  env=env)
+        if env == 'prod':
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+        else:
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+        Common.logger(log_type, crawler).info("视频上传完成")
+
+        if our_video_id is None:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+            return
+
+        insert_sql = f""" insert into crawler_video(video_id,
+                                                    out_user_id,
+                                                    platform,
+                                                    strategy,
+                                                    out_video_id,
+                                                    video_title,
+                                                    cover_url,
+                                                    video_url,
+                                                    duration,
+                                                    publish_time,
+                                                    play_cnt,
+                                                    crawler_rule,
+                                                    width,
+                                                    height)
+                                                    values({our_video_id},
+                                                    "{video_dict['user_id']}",
+                                                    "{cls.platform}",
+                                                    "定向爬虫策略",
+                                                    "{video_dict['video_id']}",
+                                                    "{video_dict['video_title']}",
+                                                    "{video_dict['cover_url']}",
+                                                    "{video_dict['video_url']}",
+                                                    {int(video_dict['duration'])},
+                                                    "{video_dict['publish_time_str']}",
+                                                    {int(video_dict['play_cnt'])},
+                                                    '{json.dumps(rule_dict)}',
+                                                    {int(video_dict['video_width'])},
+                                                    {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+        Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+
+        # 视频写入飞书
+        Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
+        # 视频ID工作表,首行写入数据
+        upload_time = int(time.time())
+        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                   "用户主页",
+                   video_dict['video_title'],
+                   video_dict['video_id'],
+                   our_video_link,
+                   int(video_dict['duration']),
+                   f"{video_dict['video_width']}*{video_dict['video_height']}",
+                   video_dict['publish_time_str'],
+                   video_dict['user_name'],
+                   video_dict['user_id'],
+                   video_dict['avatar_url'],
+                   video_dict['cover_url'],
+                   video_dict['article_url'],
+                   video_dict['video_url']]]
+        time.sleep(0.5)
+        Feishu.update_values(log_type, crawler, "47e39d", "F2:Z2", values)
+        Common.logger(log_type, crawler).info('视频下载/上传成功\n')
+
+    @classmethod
+    def get_all_videos(cls, log_type, crawler, user_list, rule_dict, oss_endpoint, env):
+        if len(user_list) == 0:
+            Common.logger(log_type, crawler).warning(f"抓取用户列表为空\n")
+            return
+        for user in user_list:
+            # try:
+            user_name = user['nick_name']
+            wechat_name = user['link']
+            uid = user['uid']
+            Common.logger(log_type, crawler).info(f'获取 {user_name} 公众号视频\n')
+            cls.get_videoList(log_type=log_type,
+                              crawler=crawler,
+                              wechat_name=wechat_name,
+                              rule_dict=rule_dict,
+                              user_name=user_name,
+                              uid=uid,
+                              oss_endpoint=oss_endpoint,
+                              env=env)
+            cls.begin = 0
+            Common.logger(log_type, crawler).info('休眠 60 秒\n')
+            time.sleep(60)
+            # except Exception as e:
+            #     Common.logger(log_type, crawler).info(f'get_all_videos异常:{e}\n')
+
+
+if __name__ == "__main__":
+    GongzhonghaoAuthor5.get_token("author", "gongzhonghao", "dev")
+    # print(get_config_from_mysql("author", "gongzhonghao", "dev", "filter", action=""))
+    pass

+ 65 - 0
gongzhonghao/gongzhonghao_follow/get_wechat_name.py

@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/23
+import requests
+
+from common.feishu import Feishu
+
+
+class GetName:
+    @classmethod
+    def get_users(cls, token, cookie):
+        user_sheet = Feishu.get_values_batch(log_type="get", crawler="gongzhonghao", sheetid="Bzv72P")
+        for i in range(143, len(user_sheet)):
+        # for i in range(1, 6):
+            user_name = user_sheet[i][0]
+            user_index = user_sheet[i][1]
+            # print(f"user_name:{user_name}")
+            # print(f"user_index:{user_index}")
+            url = 'https://mp.weixin.qq.com/cgi-bin/searchbiz'
+            headers = {
+                'authority': 'mp.weixin.qq.com',
+                'accept': '*/*',
+                'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
+                'cache-control': 'no-cache',
+                'cookie': cookie,
+                'pragma': 'no-cache',
+                'referer': 'https://mp.weixin.qq.com/cgi-bin/appmsg?t=media/appmsg_edit_v2&action=edit&isNew=1&type=77&createType=0&token=1221914130&lang=zh_CN',
+                'sec-ch-ua': '"Chromium";v="112", "Microsoft Edge";v="112", "Not:A-Brand";v="99"',
+                'sec-ch-ua-mobile': '?0',
+                'sec-ch-ua-platform': '"macOS"',
+                'sec-fetch-dest': 'empty',
+                'sec-fetch-mode': 'cors',
+                'sec-fetch-site': 'same-origin',
+                'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 Edg/112.0.1722.58',
+                'x-requested-with': 'XMLHttpRequest',
+            }
+            params = {
+                'action': 'search_biz',
+                'begin': '0',
+                'count': '5',
+                'query': user_name,
+                'token': token,
+                'lang': 'zh_CN',
+                'f': 'json',
+                'ajax': '1',
+            }
+            response = requests.get(url=url, params=params, headers=headers)
+            user_info = response.json()["list"][user_index - 1]
+            wechat_name = user_info['alias']
+            avatar_url = user_info['round_head_img']
+            print(f"user_name:{user_name}")
+            print(f"wechat_name:{wechat_name}")
+            print(f"avatar_url:{avatar_url}")
+            print("\n")
+            Feishu.update_values(log_type="get",
+                                 crawler="gongzhonghao",
+                                 sheetid="Bzv72P",
+                                 ranges=f"C{i+1}:D{i+1}",
+                                 values=[[wechat_name, avatar_url]])
+
+
+if __name__ == "__main__":
+    gzh_token = "1053633489"
+    gzh_cookie = "pgv_pvid=4569186026; pac_uid=0_f9e46a4283b4d; ua_id=OAMvmEDYQG3jR7vtAAAAAPDDtar6-DZte3Voa67Zjr8=; wxuin=69603835570065; mm_lang=zh_CN; tvfe_boss_uuid=7f6d4ba3822d4b08; _clck=3948330815|1|f70|0; uuid=bf85c4c650144e11babcaa44773f9e94; rand_info=CAESIEl8HDdHsNxsJUNCDsHOoz//O780u3eJp9SkZqbeapgW; slave_bizuin=3948330815; data_bizuin=3948330815; bizuin=3948330815; data_ticket=1hMvddRGqB+4IqQVJ1OfzsqMEKTjaYBiPahsRwuM11fmeHY+P7cOUqnlsVoRCr0p; slave_sid=dXFQaHo5V3kyVURkTlpMb3dkMXpWZ21teWhvQ1AwU09VMkI0aHhqR1RXVU52aDYyTGp5NGttZGNLSk5Ubmp1TjJVM0xjZFF1V0l3NTdBNW5nQ3pGUUZLYUF4eEFFNmhMMmNYX1lYSXY1azhBbE5DY25mUkxvNnRCcWpDVktCdkk2cU1UM3Z3NmhCZzVWeGtC; slave_user=gh_c53f57bf4c88; xid=ca068ab9b5f3afd98718b114943b87e5"
+    GetName.get_users(gzh_token, gzh_cookie)

+ 46 - 0
gongzhonghao/gongzhonghao_main/run_gongzhonghao1_author_scheduling.py

@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/23
+import argparse
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.public import task_fun
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from gongzhonghao.gongzhonghao_author.gongzhonghao1_author import GongzhonghaoAuthor1
+
+
+def main(log_type, crawler, task, oss_endpoint, env):
+    task_dict = task_fun(task)['task_dict']
+    rule_dict = task_fun(task)['rule_dict']
+    task_id = task_dict['task_id']
+    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+    Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
+    Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
+    Common.logger(log_type, crawler).info(f"用户列表:\n{user_list}")
+    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]} 定向榜\n')
+    GongzhonghaoAuthor1.get_all_videos(log_type=log_type,
+                                        crawler=crawler,
+                                        user_list=user_list,
+                                        rule_dict=rule_dict,
+                                        oss_endpoint=oss_endpoint,
+                                        env=env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取完一轮\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--task')  ## 添加参数
+    parser.add_argument('--oss_endpoint')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         task=args.task,
+         oss_endpoint=args.oss_endpoint,
+         env=args.env)

+ 46 - 0
gongzhonghao/gongzhonghao_main/run_gongzhonghao2_author_scheduling.py

@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/23
+import argparse
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.public import task_fun
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from gongzhonghao.gongzhonghao_author.gongzhonghao2_author import GongzhonghaoAuthor2
+
+
+def main(log_type, crawler, task, oss_endpoint, env):
+    task_dict = task_fun(task)['task_dict']
+    rule_dict = task_fun(task)['rule_dict']
+    task_id = task_dict['task_id']
+    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+    Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
+    Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
+    Common.logger(log_type, crawler).info(f"用户列表:\n{user_list}")
+    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]} 定向榜\n')
+    GongzhonghaoAuthor2.get_all_videos(log_type=log_type,
+                                        crawler=crawler,
+                                        user_list=user_list,
+                                        rule_dict=rule_dict,
+                                        oss_endpoint=oss_endpoint,
+                                        env=env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取完一轮\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--task')  ## 添加参数
+    parser.add_argument('--oss_endpoint')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         task=args.task,
+         oss_endpoint=args.oss_endpoint,
+         env=args.env)

+ 46 - 0
gongzhonghao/gongzhonghao_main/run_gongzhonghao3_author_scheduling.py

@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/23
+import argparse
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.public import task_fun
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from gongzhonghao.gongzhonghao_author.gongzhonghao3_author import GongzhonghaoAuthor3
+
+
+def main(log_type, crawler, task, oss_endpoint, env):
+    task_dict = task_fun(task)['task_dict']
+    rule_dict = task_fun(task)['rule_dict']
+    task_id = task_dict['task_id']
+    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+    Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
+    Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
+    Common.logger(log_type, crawler).info(f"用户列表:\n{user_list}")
+    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]} 定向榜\n')
+    GongzhonghaoAuthor3.get_all_videos(log_type=log_type,
+                                        crawler=crawler,
+                                        user_list=user_list,
+                                        rule_dict=rule_dict,
+                                        oss_endpoint=oss_endpoint,
+                                        env=env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取完一轮\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--task')  ## 添加参数
+    parser.add_argument('--oss_endpoint')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         task=args.task,
+         oss_endpoint=args.oss_endpoint,
+         env=args.env)

+ 46 - 0
gongzhonghao/gongzhonghao_main/run_gongzhonghao4_author_scheduling.py

@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/23
+import argparse
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.public import task_fun
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from gongzhonghao.gongzhonghao_author.gongzhonghao4_author import GongzhonghaoAuthor4
+
+
+def main(log_type, crawler, task, oss_endpoint, env):
+    task_dict = task_fun(task)['task_dict']
+    rule_dict = task_fun(task)['rule_dict']
+    task_id = task_dict['task_id']
+    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+    Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
+    Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
+    Common.logger(log_type, crawler).info(f"用户列表:\n{user_list}")
+    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]} 定向榜\n')
+    GongzhonghaoAuthor4.get_all_videos(log_type=log_type,
+                                        crawler=crawler,
+                                        user_list=user_list,
+                                        rule_dict=rule_dict,
+                                        oss_endpoint=oss_endpoint,
+                                        env=env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取完一轮\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--task')  ## 添加参数
+    parser.add_argument('--oss_endpoint')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         task=args.task,
+         oss_endpoint=args.oss_endpoint,
+         env=args.env)

+ 46 - 0
gongzhonghao/gongzhonghao_main/run_gongzhonghao5_author_scheduling.py

@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/23
+import argparse
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.public import task_fun
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from gongzhonghao.gongzhonghao_author.gongzhonghao5_author import GongzhonghaoAuthor5
+
+
+def main(log_type, crawler, task, oss_endpoint, env):
+    task_dict = task_fun(task)['task_dict']
+    rule_dict = task_fun(task)['rule_dict']
+    task_id = task_dict['task_id']
+    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+    Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
+    Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
+    Common.logger(log_type, crawler).info(f"用户列表:\n{user_list}")
+    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]} 定向榜\n')
+    GongzhonghaoAuthor5.get_all_videos(log_type=log_type,
+                                        crawler=crawler,
+                                        user_list=user_list,
+                                        rule_dict=rule_dict,
+                                        oss_endpoint=oss_endpoint,
+                                        env=env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取完一轮\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--task')  ## 添加参数
+    parser.add_argument('--oss_endpoint')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         task=args.task,
+         oss_endpoint=args.oss_endpoint,
+         env=args.env)

+ 2 - 1
main/process.sh

@@ -97,7 +97,7 @@ else
   echo "$(date "+%Y-%m-%d %H:%M:%S") 小年糕小时榜爬虫策略 进程状态正常" >> ${log_path}
 fi
 
-# 播放量榜爬虫策略
+# 小年糕播放量榜爬虫策略
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 播放量榜爬虫策略 进程状态" >> ${log_path}
 ps -ef | grep "run_xiaoniangao_play.py" | grep -v "grep"
 if [ "$?" -eq 1 ];then
@@ -112,6 +112,7 @@ else
   echo "$(date "+%Y-%m-%d %H:%M:%S") 播放量榜爬虫策略 进程状态正常" >> ${log_path}
 fi
 
+
 # 快手定向爬虫策略
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 快手定向爬虫策略 进程状态" >> ${log_path}
 ps -ef | grep "run_kuaishou_follow.py" | grep -v "grep"

+ 1 - 31
scheduling/scheduling_v3/crawler_scheduling_v3.py

@@ -87,7 +87,7 @@ class SchedulingV3:
         Common.logger(log_type, crawler).info(f"已获取调度任务:{type(task)}, {task}")
         mode = task['mode']
         source = task['source']
-        spider_name = f"run_{source}_{mode}_scheduling"
+        spider_name = task['spider_name']
         if env == "aliyun":
             oss_endpoint = "inner"
         elif env == "hk":
@@ -97,36 +97,6 @@ class SchedulingV3:
 
         # 正式环境,调度任务
         Common.logger(log_type, crawler).info(f"开始调度任务")
-        # task_str = [
-        #     ('task_id', str(task['id'])),
-        #     ('task_name', str(task['task_name'])),
-        #     ('source', str(task['source'])),
-        #     ('start_time', str(task['start_time'])),
-        #     ('interval', str(task['interval'])),
-        #     ('mode', str(task['mode'])),
-        #     ('duration_min', eval(task['rule'])['duration']['min']),
-        #     ('duration_max', eval(task['rule'])['duration']['max']),
-        #     ('play_cnt_min', eval(task['rule'])['playCnt']['min']),
-        #     ('play_cnt_max', eval(task['rule'])['playCnt']['max']),
-        #     ('publish_day_min', eval(task['rule'])['period']['min']),
-        #     ('publish_day_max', eval(task['rule'])['period']['max']),
-        #     ('fans_min', eval(task['rule'])['fans']['min']),
-        #     ('fans_max', eval(task['rule'])['fans']['max']),
-        #     ('videos_min', eval(task['rule'])['videos']['min']),
-        #     ('videos_max', eval(task['rule'])['videos']['max']),
-        #     ('video_like_min', eval(task['rule'])['like']['min']),
-        #     ('video_like_max', eval(task['rule'])['like']['max']),
-        #     ('video_width_min', eval(task['rule'])['videoWidth']['min']),
-        #     ('video_width_max', eval(task['rule'])['videoWidth']['max']),
-        #     ('video_height_min', eval(task['rule'])['videoHeight']['min']),
-        #     ('video_height_max', eval(task['rule'])['videoHeight']['max']),
-        #     ('spider_name', str(task['spider_name'])),
-        #     ('machine', str(task['machine'])),
-        #     ('status', str(task['status'])),
-        #     ('create_time', str(task['create_time'])),
-        #     ('update_time', str(task['update_time'])),
-        #     ('operator', str(task['operator']))
-        # ]
         task_str = [
             ('task_id', str(task['id'])),
             ('task_name', str(task['task_name'])),

+ 66 - 50
xiaoniangao/xiaoniangao_author/xiaoniangao_author_scheduling.py

@@ -34,10 +34,10 @@ class XiaoniangaoAuthorScheduling:
         :param rule_dict: 规则信息,字典格式
         :return: 满足规则,返回 True;反之,返回 False
         """
-        rule_playCnt_min = rule_dict.get('playCnt', {}).get('min', 0)
-        rule_playCnt_max = rule_dict.get('playCnt', {}).get('max', 100000000)
-        if rule_playCnt_max == 0:
-            rule_playCnt_max = 100000000
+        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
+        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
+        if rule_play_cnt_max == 0:
+            rule_play_cnt_max = 100000000
 
         rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
         rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
@@ -48,59 +48,75 @@ class XiaoniangaoAuthorScheduling:
         # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
         # if rule_period_max == 0:
         #     rule_period_max = 100000000
-        #
-        # rule_fans_min = rule_dict.get('fans', {}).get('min', 0)
-        # rule_fans_max = rule_dict.get('fans', {}).get('max', 100000000)
-        # if rule_fans_max == 0:
-        #     rule_fans_max = 100000000
-        #
-        # rule_videos_min = rule_dict.get('videos', {}).get('min', 0)
-        # rule_videos_max = rule_dict.get('videos', {}).get('max', 100000000)
-        # if rule_videos_max == 0:
-        #     rule_videos_max = 100000000
 
-        rule_like_min = rule_dict.get('like', {}).get('min', 0)
-        rule_like_max = rule_dict.get('like', {}).get('max', 100000000)
-        if rule_like_max == 0:
-            rule_like_max = 100000000
+        rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
+        rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
+        if rule_fans_cnt_max == 0:
+            rule_fans_cnt_max = 100000000
 
-        rule_videoWidth_min = rule_dict.get('videoWidth', {}).get('min', 0)
-        rule_videoWidth_max = rule_dict.get('videoWidth', {}).get('max', 100000000)
-        if rule_videoWidth_max == 0:
-            rule_videoWidth_max = 100000000
+        rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
+        rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
+        if rule_videos_cnt_max == 0:
+            rule_videos_cnt_max = 100000000
 
-        rule_videoHeight_min = rule_dict.get('videoHeight', {}).get('min', 0)
-        rule_videoHeight_max = rule_dict.get('videoHeight', {}).get('max', 100000000)
-        if rule_videoHeight_max == 0:
-            rule_videoHeight_max = 100000000
+        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
+        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
+        if rule_like_cnt_max == 0:
+            rule_like_cnt_max = 100000000
 
-        rule_shareCnt_min = rule_dict.get('shareCnt', {}).get('min', 0)
-        rule_shareCnt_max = rule_dict.get('shareCnt', {}).get('max', 100000000)
-        if rule_shareCnt_max == 0:
-            rule_shareCnt_max = 100000000
+        rule_width_min = rule_dict.get('width', {}).get('min', 0)
+        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
+        if rule_width_max == 0:
+            rule_width_max = 100000000
 
-        rule_commentCnt_min = rule_dict.get('commentCnt', {}).get('min', 0)
-        rule_commentCnt_max = rule_dict.get('commentCnt', {}).get('max', 100000000)
-        if rule_commentCnt_max == 0:
-            rule_commentCnt_max = 100000000
+        rule_height_min = rule_dict.get('height', {}).get('min', 0)
+        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
+        if rule_height_max == 0:
+            rule_height_max = 100000000
 
-        Common.logger(log_type, crawler).info(f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
-        Common.logger(log_type, crawler).info(f'rule_playCnt_max:{int(rule_playCnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_playCnt_min:{int(rule_playCnt_min)}')
-        Common.logger(log_type, crawler).info(f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
-        Common.logger(log_type, crawler).info(f'rule_like_max:{int(rule_like_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_min:{int(rule_like_min)}')
-        Common.logger(log_type, crawler).info(f'rule_commentCnt_max:{int(rule_commentCnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_commentCnt_min:{int(rule_commentCnt_min)}')
-        Common.logger(log_type, crawler).info(f'rule_shareCnt_max:{int(rule_shareCnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_shareCnt_min:{int(rule_shareCnt_min)}')
-        Common.logger(log_type, crawler).info(f'rule_videoWidth_max:{int(rule_videoWidth_max)} >= video_width:{int(video_dict["video_width"])} >= rule_videoWidth_min:{int(rule_videoWidth_min)}')
-        Common.logger(log_type, crawler).info(f'rule_videoHeight_max:{int(rule_videoHeight_max)} >= video_height:{int(video_dict["video_height"])} >= rule_videoHeight_min:{int(rule_videoHeight_min)}')
+        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
+        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
+        if rule_share_cnt_max == 0:
+            rule_share_cnt_max = 100000000
+
+        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
+        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
+        if rule_comment_cnt_max == 0:
+            rule_comment_cnt_max = 100000000
+
+        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
+        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 100000000)
+        if rule_publish_time_max == 0:
+            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
+
+        Common.logger(log_type, crawler).info(
+            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
 
         if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
-                and int(rule_playCnt_max) >= int(video_dict['play_cnt']) >= int(rule_playCnt_min) \
+                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
                 and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min) \
-                and int(rule_like_max) >= int(video_dict['like_cnt']) >= int(rule_like_min) \
-                and int(rule_commentCnt_max) >= int(video_dict['comment_cnt']) >= int(rule_commentCnt_min) \
-                and int(rule_shareCnt_max) >= int(video_dict['share_cnt']) >= int(rule_shareCnt_min) \
-                and int(rule_videoWidth_max) >= int(video_dict['video_width']) >= int(rule_videoWidth_min) \
-                and int(rule_videoHeight_max) >= int(video_dict['video_height']) >= int(rule_videoHeight_min):
+                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
+                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
+                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
+                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
+                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
+                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min):
             return True
         else:
             return False
@@ -243,8 +259,8 @@ class XiaoniangaoAuthorScheduling:
                     }
                     for k, v in video_dict.items():
                         Common.logger(log_type, crawler).info(f"{k}:{v}")
-                    if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('min', 0)):
-                        Common.logger(log_type, crawler).info(f"发布时间超过3天:{publish_time_str}\n")
+                    if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('min', 1000)):
+                        Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('min', 1000))}天\n")
                         cls.next_t = None
                         return
 

+ 28 - 25
xiaoniangao/xiaoniangao_hour/xiaoniangao_hour.py

@@ -24,6 +24,14 @@ proxies = {"http": None, "https": None}
 class XiaoniangaoHour:
     platform = "小年糕"
 
+    words = "abcdefghijklmnopqrstuvwxyz0123456789"
+    uid = f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}"""
+    token = "".join(random.sample(words, 32))
+    uid_token_dict = {
+        "uid": uid,
+        "token": token
+    }
+
     # 生成 uid、token
     @classmethod
     def get_uid_token(cls):
@@ -94,7 +102,7 @@ class XiaoniangaoHour:
     @classmethod
     def get_videoList(cls, log_type, crawler, env):
         # try:
-        uid_token_dict = cls.get_uid_token()
+        uid_token_dict = cls.uid_token_dict
         url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
         headers = {
             # "x-b3-traceid": cls.hour_x_b3_traceid,
@@ -353,18 +361,16 @@ class XiaoniangaoHour:
                     "{publish_time_str}",
                     {video_play_cnt},
                     {int(time.time())},
-                    "{time.strftime("%Y-%y-%d %H:%M:%S", time.localtime(int(time.time())))}"
+                    "{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time())))}"
                     )"""
                     Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
                     MysqlHelper.update_values(log_type, crawler, insert_sql, env)
                     Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
 
     @classmethod
     def get_video_info(cls, log_type, crawler, p_id, p_mid, v_title, v_id):
         # try:
-        uid_token_dict = cls.get_uid_token()
+        uid_token_dict = cls.uid_token_dict
         url = "https://kapi.xiaoniangao.cn/profile/get_profile_by_id"
         headers = {
             # "x-b3-traceid": cls.hour_x_b3_traceid,
@@ -462,9 +468,6 @@ class XiaoniangaoHour:
             }
             return video_info_dict
 
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error(f"download_video:{e}\n")
-
     # 更新小时榜数据
     @classmethod
     def update_videoList(cls, log_type, crawler, strategy, oss_endpoint, env):
@@ -528,8 +531,6 @@ class XiaoniangaoHour:
                                      env)
             else:
                 pass
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error(f"update_videoList:{e}\n")
 
     @classmethod
     def download(cls, log_type, crawler, video_info_dict, strategy, oss_endpoint, env):
@@ -633,47 +634,49 @@ class XiaoniangaoHour:
         if cls.repeat_video(log_type, crawler, video_info_dict["video_id"], env) != 0:
             Common.logger(log_type, crawler).info('视频已下载\n')
         # 播放量大于 50000,直接下载
-        elif int(video_info_dict["play_cnt"]) >= 50000:
+        elif int(video_info_dict["play_cnt"]) >= 30000:
             Common.logger(log_type, crawler).info(
-                f"播放量:{video_info_dict['play_cnt']} >= 50000,满足下载规则,开始下载视频")
+                f"播放量:{video_info_dict['play_cnt']} >= 30000,满足下载规则,开始下载视频")
             cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
 
         # 上升榜判断逻辑,任意时间段上升量>=5000,连续两个时间段上升量>=2000
-        elif int(update_video_info['ten_play_cnt']) >= 5000 or int(
-                update_video_info['fifteen_play_cnt']) >= 5000 or int(update_video_info['twenty_play_cnt']) >= 5000:
+        elif int(update_video_info['ten_play_cnt']) >= 3000 or int(
+                update_video_info['fifteen_play_cnt']) >= 3000 or int(update_video_info['twenty_play_cnt']) >= 3000:
             Common.logger(log_type, crawler).info(
-                f"10:00 or 15:00 or 20:00 数据上升量:{int(update_video_info['ten_play_cnt'])} or {int(update_video_info['fifteen_play_cnt'])} or {int(update_video_info['twenty_play_cnt'])} >= 5000")
+                f"10:00 or 15:00 or 20:00 数据上升量:{int(update_video_info['ten_play_cnt'])} or {int(update_video_info['fifteen_play_cnt'])} or {int(update_video_info['twenty_play_cnt'])} >= 3000")
             Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
             cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
 
-        elif int(update_video_info['ten_play_cnt']) >= 2000 and int(update_video_info['fifteen_play_cnt']) >= 2000:
+        elif int(update_video_info['ten_play_cnt']) >= 1000 and int(update_video_info['fifteen_play_cnt']) >= 1000:
             Common.logger(log_type, crawler).info(
-                f"10:00 and 15:00 数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['fifteen_play_cnt'])} >= 2000")
+                f"10:00 and 15:00 数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['fifteen_play_cnt'])} >= 1000")
             Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
             cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
 
-        elif int(update_video_info['fifteen_play_cnt']) >= 2000 and int(update_video_info['twenty_play_cnt']) >= 2000:
+        elif int(update_video_info['fifteen_play_cnt']) >= 1000 and int(update_video_info['twenty_play_cnt']) >= 1000:
             Common.logger(log_type, crawler).info(
-                f"15:00 and 20:00 数据上升量:{int(update_video_info['fifteen_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 2000")
+                f"15:00 and 20:00 数据上升量:{int(update_video_info['fifteen_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 1000")
             Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
             cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
 
-        elif int(update_video_info['ten_play_cnt']) >= 2000 and int(update_video_info['twenty_play_cnt']) >= 2000:
+        elif int(update_video_info['ten_play_cnt']) >= 1000 and int(update_video_info['twenty_play_cnt']) >= 1000:
             Common.logger(log_type, crawler).info(
-                f"今日10:00 / 20:00数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 2000")
+                f"今日10:00 / 20:00数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 1000")
             Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
             cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
 
         else:
             Common.logger(log_type, crawler).info("上升量不满足下载规则")
-    # except Exception as e:
-    #     Common.logger(log_type, crawler).error(f"download_publish:{e}\n")
 
 
 if __name__ == "__main__":
-    print(XiaoniangaoHour.get_expression())
+    # print(XiaoniangaoHour.get_expression())
     # print(XiaoniangaoHour.get_uid_token())
     # XiaoniangaoHour.get_videoList("test", "xiaoniangao", "dev")
     # XiaoniangaoHour.update_videoList("test", "xiaoniangao", "小时榜爬虫策略", "out", "dev")
-
+    # befor_yesterday = (datetime.date.today() + datetime.timedelta(days=-3)).strftime("%Y-%m-%d %H:%M:%S")
+    # update_time_stamp = int(time.mktime(time.strptime(befor_yesterday, "%Y-%m-%d %H:%M:%S")))
+    # print(update_time_stamp)
+    # print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))))
+    print(XiaoniangaoHour.uid_token_dict)
     pass

+ 79 - 64
xiaoniangao/xiaoniangao_hour/xiaoniangao_hour_scheduling.py

@@ -21,6 +21,13 @@ proxies = {"http": None, "https": None}
 
 class XiaoniangaoHourScheduling:
     platform = "小年糕"
+    words = "abcdefghijklmnopqrstuvwxyz0123456789"
+    uid = f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}"""
+    token = "".join(random.sample(words, 32))
+    uid_token_dict = {
+        "uid": uid,
+        "token": token
+    }
 
     # 生成 uid、token
     @classmethod
@@ -45,10 +52,10 @@ class XiaoniangaoHourScheduling:
         :param rule_dict: 规则信息,字典格式
         :return: 满足规则,返回 True;反之,返回 False
         """
-        rule_playCnt_min = rule_dict.get('playCnt', {}).get('min', 0)
-        rule_playCnt_max = rule_dict.get('playCnt', {}).get('max', 100000000)
-        if rule_playCnt_max == 0:
-            rule_playCnt_max = 100000000
+        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
+        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
+        if rule_play_cnt_max == 0:
+            rule_play_cnt_max = 100000000
 
         rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
         rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
@@ -59,67 +66,75 @@ class XiaoniangaoHourScheduling:
         # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
         # if rule_period_max == 0:
         #     rule_period_max = 100000000
-        #
-        # rule_fans_min = rule_dict.get('fans', {}).get('min', 0)
-        # rule_fans_max = rule_dict.get('fans', {}).get('max', 100000000)
-        # if rule_fans_max == 0:
-        #     rule_fans_max = 100000000
-        #
-        # rule_videos_min = rule_dict.get('videos', {}).get('min', 0)
-        # rule_videos_max = rule_dict.get('videos', {}).get('max', 100000000)
-        # if rule_videos_max == 0:
-        #     rule_videos_max = 100000000
-
-        rule_like_min = rule_dict.get('like', {}).get('min', 0)
-        rule_like_max = rule_dict.get('like', {}).get('max', 100000000)
-        if rule_like_max == 0:
-            rule_like_max = 100000000
-
-        rule_videoWidth_min = rule_dict.get('videoWidth', {}).get('min', 0)
-        rule_videoWidth_max = rule_dict.get('videoWidth', {}).get('max', 100000000)
-        if rule_videoWidth_max == 0:
-            rule_videoWidth_max = 100000000
-
-        rule_videoHeight_min = rule_dict.get('videoHeight', {}).get('min', 0)
-        rule_videoHeight_max = rule_dict.get('videoHeight', {}).get('max', 100000000)
-        if rule_videoHeight_max == 0:
-            rule_videoHeight_max = 100000000
-
-        rule_shareCnt_min = rule_dict.get('shareCnt', {}).get('min', 0)
-        rule_shareCnt_max = rule_dict.get('shareCnt', {}).get('max', 100000000)
-        if rule_shareCnt_max == 0:
-            rule_shareCnt_max = 100000000
-
-        rule_commentCnt_min = rule_dict.get('commentCnt', {}).get('min', 0)
-        rule_commentCnt_max = rule_dict.get('commentCnt', {}).get('max', 100000000)
-        if rule_commentCnt_max == 0:
-            rule_commentCnt_max = 100000000
+
+        rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
+        rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
+        if rule_fans_cnt_max == 0:
+            rule_fans_cnt_max = 100000000
+
+        rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
+        rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
+        if rule_videos_cnt_max == 0:
+            rule_videos_cnt_max = 100000000
+
+        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
+        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
+        if rule_like_cnt_max == 0:
+            rule_like_cnt_max = 100000000
+
+        rule_width_min = rule_dict.get('width', {}).get('min', 0)
+        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
+        if rule_width_max == 0:
+            rule_width_max = 100000000
+
+        rule_height_min = rule_dict.get('height', {}).get('min', 0)
+        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
+        if rule_height_max == 0:
+            rule_height_max = 100000000
+
+        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
+        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
+        if rule_share_cnt_max == 0:
+            rule_share_cnt_max = 100000000
+
+        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
+        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
+        if rule_comment_cnt_max == 0:
+            rule_comment_cnt_max = 100000000
+
+        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
+        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 100000000)
+        if rule_publish_time_max == 0:
+            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
 
         Common.logger(log_type, crawler).info(
             f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
         Common.logger(log_type, crawler).info(
-            f'rule_playCnt_max:{int(rule_playCnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_playCnt_min:{int(rule_playCnt_min)}')
+            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
         Common.logger(log_type, crawler).info(
             f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
         Common.logger(log_type, crawler).info(
-            f'rule_like_max:{int(rule_like_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_min:{int(rule_like_min)}')
+            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
         Common.logger(log_type, crawler).info(
-            f'rule_commentCnt_max:{int(rule_commentCnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_commentCnt_min:{int(rule_commentCnt_min)}')
+            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
         Common.logger(log_type, crawler).info(
-            f'rule_shareCnt_max:{int(rule_shareCnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_shareCnt_min:{int(rule_shareCnt_min)}')
+            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
         Common.logger(log_type, crawler).info(
-            f'rule_videoWidth_max:{int(rule_videoWidth_max)} >= video_width:{int(video_dict["video_width"])} >= rule_videoWidth_min:{int(rule_videoWidth_min)}')
+            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
         Common.logger(log_type, crawler).info(
-            f'rule_videoHeight_max:{int(rule_videoHeight_max)} >= video_height:{int(video_dict["video_height"])} >= rule_videoHeight_min:{int(rule_videoHeight_min)}')
+            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
 
         if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
-                and int(rule_playCnt_max) >= int(video_dict['play_cnt']) >= int(rule_playCnt_min) \
+                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
                 and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min) \
-                and int(rule_like_max) >= int(video_dict['like_cnt']) >= int(rule_like_min) \
-                and int(rule_commentCnt_max) >= int(video_dict['comment_cnt']) >= int(rule_commentCnt_min) \
-                and int(rule_shareCnt_max) >= int(video_dict['share_cnt']) >= int(rule_shareCnt_min) \
-                and int(rule_videoWidth_max) >= int(video_dict['video_width']) >= int(rule_videoWidth_min) \
-                and int(rule_videoHeight_max) >= int(video_dict['video_height']) >= int(rule_videoHeight_min):
+                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
+                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
+                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
+                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
+                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
+                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min):
             return True
         else:
             return False
@@ -139,7 +154,7 @@ class XiaoniangaoHourScheduling:
     # 获取列表
     @classmethod
     def get_videoList(cls, log_type, crawler, rule_dict, env):
-        uid_token_dict = cls.get_uid_token()
+        uid_token_dict = cls.uid_token_dict
         url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
         headers = {
             "x-b3-traceid": '1c403a4aa72e3c',
@@ -334,7 +349,7 @@ class XiaoniangaoHourScheduling:
 
     @classmethod
     def get_video_info(cls, log_type, crawler, p_id, p_mid, v_title, v_id):
-        uid_token_dict = cls.get_uid_token()
+        uid_token_dict = cls.uid_token_dict
         url = "https://kapi.xiaoniangao.cn/profile/get_profile_by_id"
         headers = {
             "x-b3-traceid": '1c403a4aa72e3c',
@@ -609,9 +624,9 @@ class XiaoniangaoHourScheduling:
         if cls.repeat_video(log_type, crawler, video_info_dict["video_id"], env) != 0:
             Common.logger(log_type, crawler).info('视频已下载\n')
         # 播放量大于 50000,直接下载
-        elif int(video_info_dict["play_cnt"]) >= 50000:
+        elif int(video_info_dict["play_cnt"]) >= 30000:
             Common.logger(log_type, crawler).info(
-                f"播放量:{video_info_dict['play_cnt']} >= 50000,满足下载规则,开始下载视频")
+                f"播放量:{video_info_dict['play_cnt']} >= 30000,满足下载规则,开始下载视频")
             cls.download(log_type=log_type,
                          crawler=crawler,
                          video_info_dict=video_info_dict,
@@ -621,10 +636,10 @@ class XiaoniangaoHourScheduling:
                          env=env)
 
         # 上升榜判断逻辑,任意时间段上升量>=5000,连续两个时间段上升量>=2000
-        elif int(update_video_info['ten_play_cnt']) >= 5000 or int(
-                update_video_info['fifteen_play_cnt']) >= 5000 or int(update_video_info['twenty_play_cnt']) >= 5000:
+        elif int(update_video_info['ten_play_cnt']) >= 3000 or int(
+                update_video_info['fifteen_play_cnt']) >= 3000 or int(update_video_info['twenty_play_cnt']) >= 3000:
             Common.logger(log_type, crawler).info(
-                f"10:00 or 15:00 or 20:00 数据上升量:{int(update_video_info['ten_play_cnt'])} or {int(update_video_info['fifteen_play_cnt'])} or {int(update_video_info['twenty_play_cnt'])} >= 5000")
+                f"10:00 or 15:00 or 20:00 数据上升量:{int(update_video_info['ten_play_cnt'])} or {int(update_video_info['fifteen_play_cnt'])} or {int(update_video_info['twenty_play_cnt'])} >= 3000")
             Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
             cls.download(log_type=log_type,
                          crawler=crawler,
@@ -634,9 +649,9 @@ class XiaoniangaoHourScheduling:
                          oss_endpoint=oss_endpoint,
                          env=env)
 
-        elif int(update_video_info['ten_play_cnt']) >= 2000 and int(update_video_info['fifteen_play_cnt']) >= 2000:
+        elif int(update_video_info['ten_play_cnt']) >= 1000 and int(update_video_info['fifteen_play_cnt']) >= 1000:
             Common.logger(log_type, crawler).info(
-                f"10:00 and 15:00 数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['fifteen_play_cnt'])} >= 2000")
+                f"10:00 and 15:00 数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['fifteen_play_cnt'])} >= 1000")
             Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
             cls.download(log_type=log_type,
                          crawler=crawler,
@@ -646,9 +661,9 @@ class XiaoniangaoHourScheduling:
                          oss_endpoint=oss_endpoint,
                          env=env)
 
-        elif int(update_video_info['fifteen_play_cnt']) >= 2000 and int(update_video_info['twenty_play_cnt']) >= 2000:
+        elif int(update_video_info['fifteen_play_cnt']) >= 1000 and int(update_video_info['twenty_play_cnt']) >= 1000:
             Common.logger(log_type, crawler).info(
-                f"15:00 and 20:00 数据上升量:{int(update_video_info['fifteen_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 2000")
+                f"15:00 and 20:00 数据上升量:{int(update_video_info['fifteen_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 1000")
             Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
             cls.download(log_type=log_type,
                          crawler=crawler,
@@ -658,9 +673,9 @@ class XiaoniangaoHourScheduling:
                          oss_endpoint=oss_endpoint,
                          env=env)
 
-        elif int(update_video_info['ten_play_cnt']) >= 2000 and int(update_video_info['twenty_play_cnt']) >= 2000:
+        elif int(update_video_info['ten_play_cnt']) >= 1000 and int(update_video_info['twenty_play_cnt']) >= 1000:
             Common.logger(log_type, crawler).info(
-                f"今日10:00 / 20:00数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 2000")
+                f"今日10:00 / 20:00数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 1000")
             Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
             cls.download(log_type=log_type,
                          crawler=crawler,

+ 10 - 16
xiaoniangao/xiaoniangao_play/xiaoniangao_play.py

@@ -23,6 +23,14 @@ proxies = {"http": None, "https": None}
 class XiaoniangaoPlay:
     platform = "小年糕"
 
+    words = "abcdefghijklmnopqrstuvwxyz0123456789"
+    uid = f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}"""
+    token = "".join(random.sample(words, 32))
+    uid_token_dict = {
+        "uid": uid,
+        "token": token
+    }
+
     # 生成 uid、token
     @classmethod
     def get_uid_token(cls):
@@ -48,7 +56,7 @@ class XiaoniangaoPlay:
             # 宽或高
             if int(video_dict['video_width']) >= 0 or int(video_dict['video_height']) >= 0:
                 # 播放量
-                if int(video_dict['play_cnt']) >= 80000:
+                if int(video_dict['play_cnt']) >= 20000:
                     # 点赞量
                     if int(video_dict['like_cnt']) >= 0:
                         # 分享量
@@ -79,21 +87,17 @@ class XiaoniangaoPlay:
     # 获取列表
     @classmethod
     def get_videoList(cls, log_type, crawler, strategy, oss_endpoint, env):
-        uid_token_dict = cls.get_uid_token()
+        uid_token_dict = cls.uid_token_dict
         url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
         headers = {
-            # "x-b3-traceid": cls.play_x_b3_traceid,
             "x-b3-traceid": '1dc0a6d0929a2b',
-            # "X-Token-Id": cls.play_x_token_id,
             "X-Token-Id": 'ae99a4953804085ebb0ae36fa138031d-1146052582',
-            # "uid": cls.play_uid,
             "uid": uid_token_dict['uid'],
             "content-type": "application/json",
             "Accept-Encoding": "gzip,compress,br,deflate",
             "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
                           ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
                           'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
-            # "Referer": cls.play_referer
             "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/620/page-frame.html'
         }
         data = {
@@ -141,15 +145,12 @@ class XiaoniangaoPlay:
                 "session_id": "7bcce313-b57d-4305-8d14-6ebd9a1bad29"
             },
             "refresh": False,
-            # "token": cls.play_token,
             "token": uid_token_dict['token'],
-            # "uid": cls.play_uid,
             "uid": uid_token_dict['uid'],
             "proj": "ma",
             "wx_ver": "8.0.20",
             "code_ver": "3.62.0"
         }
-        # try:
         urllib3.disable_warnings()
         r = requests.post(url=url, headers=headers, json=data, proxies=proxies, verify=False)
         if "data" not in r.text or r.status_code != 200:
@@ -306,9 +307,6 @@ class XiaoniangaoPlay:
                                      oss_endpoint=oss_endpoint,
                                      env=env)
 
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error("get_play_feeds异常:{}", e)
-
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
         sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
@@ -317,7 +315,6 @@ class XiaoniangaoPlay:
 
     @classmethod
     def download_publish(cls, log_type, crawler, video_dict, strategy, oss_endpoint, env):
-        # try:
         # 过滤无效视频
         if video_dict["video_id"] == 0 \
                 or video_dict["video_url"] == 0\
@@ -424,9 +421,6 @@ class XiaoniangaoPlay:
             Feishu.update_values(log_type, crawler, "c85k1C", "F2:Z2", values)
             Common.logger(log_type, crawler).info('视频信息写入飞书成功\n')
 
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error('download_publish异常:{}', e)
-
 
 if __name__ == '__main__':
     XiaoniangaoPlay.get_videoList("play", "xiaoniangao", "播放量榜爬虫策略", "out", "dev")

+ 62 - 48
xiaoniangao/xiaoniangao_play/xiaoniangao_play_scheduling.py

@@ -20,6 +20,13 @@ proxies = {"http": None, "https": None}
 
 class XiaoniangaoPlayScheduling:
     platform = "小年糕"
+    words = "abcdefghijklmnopqrstuvwxyz0123456789"
+    uid = f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}"""
+    token = "".join(random.sample(words, 32))
+    uid_token_dict = {
+        "uid": uid,
+        "token": token
+    }
 
     # 生成 uid、token
     @classmethod
@@ -44,10 +51,10 @@ class XiaoniangaoPlayScheduling:
         :param rule_dict: 规则信息,字典格式
         :return: 满足规则,返回 True;反之,返回 False
         """
-        rule_playCnt_min = rule_dict.get('playCnt', {}).get('min', 0)
-        rule_playCnt_max = rule_dict.get('playCnt', {}).get('max', 100000000)
-        if rule_playCnt_max == 0:
-            rule_playCnt_max = 100000000
+        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
+        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
+        if rule_play_cnt_max == 0:
+            rule_play_cnt_max = 100000000
 
         rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
         rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
@@ -58,67 +65,75 @@ class XiaoniangaoPlayScheduling:
         # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
         # if rule_period_max == 0:
         #     rule_period_max = 100000000
-        #
-        # rule_fans_min = rule_dict.get('fans', {}).get('min', 0)
-        # rule_fans_max = rule_dict.get('fans', {}).get('max', 100000000)
-        # if rule_fans_max == 0:
-        #     rule_fans_max = 100000000
-        #
-        # rule_videos_min = rule_dict.get('videos', {}).get('min', 0)
-        # rule_videos_max = rule_dict.get('videos', {}).get('max', 100000000)
-        # if rule_videos_max == 0:
-        #     rule_videos_max = 100000000
 
-        rule_like_min = rule_dict.get('like', {}).get('min', 0)
-        rule_like_max = rule_dict.get('like', {}).get('max', 100000000)
-        if rule_like_max == 0:
-            rule_like_max = 100000000
+        rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
+        rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
+        if rule_fans_cnt_max == 0:
+            rule_fans_cnt_max = 100000000
 
-        rule_videoWidth_min = rule_dict.get('videoWidth', {}).get('min', 0)
-        rule_videoWidth_max = rule_dict.get('videoWidth', {}).get('max', 100000000)
-        if rule_videoWidth_max == 0:
-            rule_videoWidth_max = 100000000
+        rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
+        rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
+        if rule_videos_cnt_max == 0:
+            rule_videos_cnt_max = 100000000
 
-        rule_videoHeight_min = rule_dict.get('videoHeight', {}).get('min', 0)
-        rule_videoHeight_max = rule_dict.get('videoHeight', {}).get('max', 100000000)
-        if rule_videoHeight_max == 0:
-            rule_videoHeight_max = 100000000
+        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
+        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
+        if rule_like_cnt_max == 0:
+            rule_like_cnt_max = 100000000
 
-        rule_shareCnt_min = rule_dict.get('shareCnt', {}).get('min', 0)
-        rule_shareCnt_max = rule_dict.get('shareCnt', {}).get('max', 100000000)
-        if rule_shareCnt_max == 0:
-            rule_shareCnt_max = 100000000
+        rule_width_min = rule_dict.get('width', {}).get('min', 0)
+        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
+        if rule_width_max == 0:
+            rule_width_max = 100000000
 
-        rule_commentCnt_min = rule_dict.get('commentCnt', {}).get('min', 0)
-        rule_commentCnt_max = rule_dict.get('commentCnt', {}).get('max', 100000000)
-        if rule_commentCnt_max == 0:
-            rule_commentCnt_max = 100000000
+        rule_height_min = rule_dict.get('height', {}).get('min', 0)
+        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
+        if rule_height_max == 0:
+            rule_height_max = 100000000
+
+        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
+        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
+        if rule_share_cnt_max == 0:
+            rule_share_cnt_max = 100000000
+
+        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
+        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
+        if rule_comment_cnt_max == 0:
+            rule_comment_cnt_max = 100000000
+
+        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
+        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 100000000)
+        if rule_publish_time_max == 0:
+            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
 
         Common.logger(log_type, crawler).info(
             f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
         Common.logger(log_type, crawler).info(
-            f'rule_playCnt_max:{int(rule_playCnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_playCnt_min:{int(rule_playCnt_min)}')
+            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
         Common.logger(log_type, crawler).info(
             f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
         Common.logger(log_type, crawler).info(
-            f'rule_like_max:{int(rule_like_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_min:{int(rule_like_min)}')
+            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
         Common.logger(log_type, crawler).info(
-            f'rule_commentCnt_max:{int(rule_commentCnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_commentCnt_min:{int(rule_commentCnt_min)}')
+            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
         Common.logger(log_type, crawler).info(
-            f'rule_shareCnt_max:{int(rule_shareCnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_shareCnt_min:{int(rule_shareCnt_min)}')
+            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
         Common.logger(log_type, crawler).info(
-            f'rule_videoWidth_max:{int(rule_videoWidth_max)} >= video_width:{int(video_dict["video_width"])} >= rule_videoWidth_min:{int(rule_videoWidth_min)}')
+            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
         Common.logger(log_type, crawler).info(
-            f'rule_videoHeight_max:{int(rule_videoHeight_max)} >= video_height:{int(video_dict["video_height"])} >= rule_videoHeight_min:{int(rule_videoHeight_min)}')
+            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
 
         if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
-                and int(rule_playCnt_max) >= int(video_dict['play_cnt']) >= int(rule_playCnt_min) \
+                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
                 and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min) \
-                and int(rule_like_max) >= int(video_dict['like_cnt']) >= int(rule_like_min) \
-                and int(rule_commentCnt_max) >= int(video_dict['comment_cnt']) >= int(rule_commentCnt_min) \
-                and int(rule_shareCnt_max) >= int(video_dict['share_cnt']) >= int(rule_shareCnt_min) \
-                and int(rule_videoWidth_max) >= int(video_dict['video_width']) >= int(rule_videoWidth_min) \
-                and int(rule_videoHeight_max) >= int(video_dict['video_height']) >= int(rule_videoHeight_min):
+                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
+                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
+                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
+                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
+                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min)\
+                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min):
             return True
         else:
             return False
@@ -126,7 +141,7 @@ class XiaoniangaoPlayScheduling:
     # 获取列表
     @classmethod
     def get_videoList(cls, log_type, crawler, rule_dict, strategy, oss_endpoint, env):
-        uid_token_dict = cls.get_uid_token()
+        uid_token_dict = cls.uid_token_dict
         url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
         headers = {
             "x-b3-traceid": '1dc0a6d0929a2b',
@@ -393,6 +408,5 @@ class XiaoniangaoPlayScheduling:
 
 
 if __name__ == '__main__':
-    XiaoniangaoPlayScheduling.get_videoList("play", "xiaoniangao", "播放量榜爬虫策略", "out", "dev")
 
     pass