wangkun il y a 1 an
Parent
commit
e6edc49239

+ 3 - 3
common/feishu.py

@@ -474,12 +474,12 @@ class Feishu:
                 content = "微信指数"
                 sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnqhMRUGunIfGnGXMOBYiy4K?sheet=sVL74k"
                 users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "yuzhuoyi")) + "></at> <at id=" + str(
-                    cls.get_userid(log_type, crawler, "muxinyi")) + "></at>\n"
+                    cls.get_userid(log_type, crawler, "rennian")) + "></at>\n"
             elif crawler == "weixinzhishu":
                 content = "微信指数"
                 sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnqhMRUGunIfGnGXMOBYiy4K?sheet=sVL74k"
                 users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
-                    cls.get_userid(log_type, crawler, "muxinyi")) + "></at>\n"
+                    cls.get_userid(log_type, crawler, "rennian")) + "></at>\n"
 
             elif crawler == "xiaoniangao_hour":
                 content = "小年糕_小时级_已下载表"
@@ -609,7 +609,7 @@ class Feishu:
             elif crawler == "gongzhonghao":
                 content = "公众号_信欣_爬虫表"
                 sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcna98M2mX7TbivTj9Sb7WKBN?"
-                users = f"\n<at id={str(cls.get_userid(log_type, crawler, 'huxinxue'))}></at> <at id={str(cls.get_userid(log_type, crawler, 'wangxueke'))}></at> <at id={str(cls.get_userid(log_type, crawler, 'xinxin'))}></at>\n"
+                users = f"\n<at id={str(cls.get_userid(log_type, crawler, 'huxinxue'))}></at> <at id={str(cls.get_userid(log_type, crawler, 'wangxueke'))}></at> <at id={str(cls.get_userid(log_type, crawler, 'rennian'))}></at> <at id={str(cls.get_userid(log_type, crawler, 'xinxin'))}></at>\n"
 
             elif crawler == "weiqun":
                 content = "微群爬虫表"

+ 158 - 245
gongzhonghao/gongzhonghao_author/gongzhonghao4_author.py

@@ -2,7 +2,7 @@
 # @Author: wangkun
 # @Time: 2023/3/28
 import datetime
-import difflib
+# import difflib
 import json
 import os
 import shutil
@@ -20,123 +20,12 @@ from common.common import Common
 from common.feishu import Feishu
 from common.publish import Publish
 from common.scheduling_db import MysqlHelper
-from common.public import get_config_from_mysql
+from common.public import get_config_from_mysql, title_like, download_rule
 
 
 class GongzhonghaoAuthor4:
     platform = "公众号"
 
-    # 基础门槛规则
-    @staticmethod
-    def download_rule(log_type, crawler, video_dict, rule_dict):
-        """
-        下载视频的基本规则
-        :param log_type: 日志
-        :param crawler: 哪款爬虫
-        :param video_dict: 视频信息,字典格式
-        :param rule_dict: 规则信息,字典格式
-        :return: 满足规则,返回 True;反之,返回 False
-        """
-        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
-        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
-        if rule_play_cnt_max == 0:
-            rule_play_cnt_max = 100000000
-
-        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
-        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
-        if rule_duration_max == 0:
-            rule_duration_max = 100000000
-
-        rule_period_min = rule_dict.get('period', {}).get('min', 0)
-        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
-        # if rule_period_max == 0:
-        #     rule_period_max = 100000000
-
-        rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
-        rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
-        if rule_fans_cnt_max == 0:
-            rule_fans_cnt_max = 100000000
-
-        rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
-        rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
-        if rule_videos_cnt_max == 0:
-            rule_videos_cnt_max = 100000000
-
-        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
-        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
-        if rule_like_cnt_max == 0:
-            rule_like_cnt_max = 100000000
-
-        rule_width_min = rule_dict.get('width', {}).get('min', 0)
-        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
-        if rule_width_max == 0:
-            rule_width_max = 100000000
-
-        rule_height_min = rule_dict.get('height', {}).get('min', 0)
-        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
-        if rule_height_max == 0:
-            rule_height_max = 100000000
-
-        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
-        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
-        if rule_share_cnt_max == 0:
-            rule_share_cnt_max = 100000000
-
-        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
-        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
-        if rule_comment_cnt_max == 0:
-            rule_comment_cnt_max = 100000000
-
-        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
-        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 0)
-        if rule_publish_time_max == 0:
-            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
-
-        Common.logger(log_type, crawler).info(
-            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
-
-        if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
-                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
-                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
-                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
-                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
-                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
-                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
-                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min) \
-                and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min):
-            return True
-        else:
-            return False
-
-    @classmethod
-    def title_like(cls, log_type, crawler, title, env):
-        select_sql = f""" select * from crawler_video where platform="公众号" """
-        video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
-        if len(video_list) == 0:
-            return None
-        for video_dict in video_list:
-            video_title = video_dict["video_title"]
-            if difflib.SequenceMatcher(None, title, video_title).quick_ratio() >= 0.8:
-                return True
-            else:
-                pass
-
     # 获取 token
     @classmethod
     def get_token(cls, log_type, crawler, env):
@@ -158,9 +47,34 @@ class GongzhonghaoAuthor4:
         #     print(f"{k}:{v}")
         return token_dict
 
+    @classmethod
+    def get_users(cls, log_type, crawler, user_sheet, sheetid, i, env):
+        user_name = user_sheet[i][0]
+        wechat_name = user_sheet[i][2]
+        if wechat_name is None or wechat_name.strip() == "" or wechat_name.replace(" ", "") == "":
+            wechat_name = user_name
+        out_uid = user_sheet[i][3]
+        avatar_url = user_sheet[i][4]
+        if out_uid is None or out_uid.strip() == "" or out_uid.replace(" ", "") == "":
+            user_info_dict = cls.get_user_info(log_type=log_type, crawler=crawler, wechat_name=wechat_name, env=env)
+            out_uid = user_info_dict["user_id"]
+            avatar_url = user_info_dict["avatar_url"]
+            Feishu.update_values(log_type, crawler, sheetid, f'D{i + 1}:E{i + 1}', [[out_uid, avatar_url]])
+
+        our_user_dict = {
+            'user_name': user_name,
+            'user_id': out_uid,
+            'wechat_name': wechat_name,
+            'avatar_url': avatar_url,
+        }
+        for k, v in our_user_dict.items():
+            Common.logger(log_type, crawler).info(f"{k}:{v}")
+        return our_user_dict
+
     # 获取用户 fakeid
     @classmethod
-    def get_fakeid(cls, log_type, crawler, wechat_name, env):
+    def get_user_info(cls, log_type, crawler, wechat_name, env):
+        Common.logger(log_type, crawler).info(f"获取站外用户信息:{wechat_name}")
         while True:
             token_dict = cls.get_token(log_type, crawler, env)
             url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
@@ -216,11 +130,10 @@ class GongzhonghaoAuthor4:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
                 continue
-
-            fakeid = r.json()["list"][0]["fakeid"]
-            head_url = r.json()["list"][0]["round_head_img"]
-            fakeid_dict = {'fakeid': fakeid, 'head_url': head_url}
-            return fakeid_dict
+            user_info_dict = {'user_name': r.json()["list"][0]["nickname"],
+                              'user_id': r.json()["list"][0]["fakeid"],
+                              'avatar_url': r.json()["list"][0]["round_head_img"]}
+            return user_info_dict
 
     # 获取腾讯视频下载链接
     @classmethod
@@ -273,14 +186,10 @@ class GongzhonghaoAuthor4:
 
     # 获取文章列表
     @classmethod
-    def get_videoList(cls, log_type, crawler, wechat_name, rule_dict, user_name, uid, oss_endpoint, env):
+    def get_videoList(cls, log_type, crawler, rule_dict, user_dict, env):
         begin = 0
         while True:
             token_dict = cls.get_token(log_type, crawler, env)
-            fakeid_dict = cls.get_fakeid(log_type=log_type,
-                                         crawler=crawler,
-                                         wechat_name=wechat_name,
-                                         env=env)
             url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
             headers = {
                 "accept": "*/*",
@@ -304,7 +213,7 @@ class GongzhonghaoAuthor4:
                 "action": "list_ex",
                 "begin": str(begin),
                 "count": "5",
-                "fakeid": fakeid_dict['fakeid'],
+                "fakeid": user_dict['user_id'],
                 "type": "9",
                 "query": "",
                 "token": str(token_dict['token']),
@@ -342,77 +251,65 @@ class GongzhonghaoAuthor4:
             else:
                 begin += 5
                 app_msg_list = r.json()['app_msg_list']
-                for article_url in app_msg_list:
-                    # title
-                    video_title = article_url.get("title", "").replace('/', '').replace('\n', '') \
-                            .replace('.', '').replace('“', '').replace('”', '').replace(' ', '')\
-                            .replace('"', '').replace("'", "")
-                    # aid
-                    aid = article_url.get('aid', '')
-                    # create_time
-                    create_time = article_url.get('create_time', 0)
-                    publish_time_stamp = int(create_time)
-                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-                    avatar_url = fakeid_dict['head_url']
-                    # cover_url
-                    cover_url = article_url.get('cover', '')
-                    # article_url
-                    article_url = article_url.get('link', '')
-                    video_url = cls.get_video_url(article_url, env)
-
-                    video_dict = {
-                        'video_id': aid,
-                        'video_title': video_title,
-                        'publish_time_stamp': publish_time_stamp,
-                        'publish_time_str': publish_time_str,
-                        'user_name': user_name,
-                        'play_cnt': 0,
-                        'comment_cnt': 0,
-                        'like_cnt': 0,
-                        'share_cnt': 0,
-                        'user_id': fakeid_dict['fakeid'],
-                        'avatar_url': avatar_url,
-                        'cover_url': cover_url,
-                        'article_url': article_url,
-                        'video_url': video_url,
-                        'session': f'gongzhonghao-author1-{int(time.time())}'
-                    }
-                    for k, v in video_dict.items():
-                        Common.logger(log_type, crawler).info(f"{k}:{v}")
-
-                    if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('min', 1000)):
-                        Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('min', 1000))}天\n")
-                        cls.begin = 0
-                        return
-
-                    if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
-                        Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
-                    # 标题敏感词过滤
-                    elif any(str(word) if str(word) in video_dict['video_title'] else False
-                             for word in get_config_from_mysql(log_type=log_type,
-                                                               source=crawler,
-                                                               env=env,
-                                                               text="filter",
-                                                               action="")) is True:
-                        Common.logger(log_type, crawler).info("标题已中过滤词\n")
-                    # 已下载判断
-                    elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
-                        Common.logger(log_type, crawler).info("视频已下载\n")
-                    # 标题相似度
-                    elif cls.title_like(log_type, crawler, video_dict['video_title'], env) is True:
-                        Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
-                    else:
-                        cls.download_publish(log_type=log_type,
-                                             crawler=crawler,
-                                             video_dict=video_dict,
-                                             rule_dict=rule_dict,
-                                             uid=uid,
-                                             oss_endpoint=oss_endpoint,
-                                             env=env)
-
+                for article in app_msg_list:
+                    try:
+                        create_time = article.get('create_time', 0)
+                        publish_time_stamp = int(create_time)
+                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                        article_url = article.get('link', '')
+                        video_dict = {
+                            'video_id': article.get('aid', ''),
+                            'video_title': article.get("title", "").replace(' ', '').replace('"', '').replace("'", ""),
+                            'publish_time_stamp': publish_time_stamp,
+                            'publish_time_str': publish_time_str,
+                            'user_name': user_dict["user_name"],
+                            'play_cnt': 0,
+                            'comment_cnt': 0,
+                            'like_cnt': 0,
+                            'share_cnt': 0,
+                            'user_id': user_dict['user_id'],
+                            'avatar_url': user_dict['avatar_url'],
+                            'cover_url': article.get('cover', ''),
+                            'article_url': article.get('link', ''),
+                            'video_url': cls.get_video_url(article_url, env),
+                            'session': f'gongzhonghao-author1-{int(time.time())}'
+                        }
+                        for k, v in video_dict.items():
+                            Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                        if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('max', 1000)):
+                            Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
+                            return
+
+                        if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
+                            Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
+                        # 标题敏感词过滤
+                        elif any(str(word) if str(word) in video_dict['video_title'] else False
+                                 for word in get_config_from_mysql(log_type=log_type,
+                                                                   source=crawler,
+                                                                   env=env,
+                                                                   text="filter",
+                                                                   action="")) is True:
+                            Common.logger(log_type, crawler).info("标题已中过滤词\n")
+                        # 已下载判断
+                        elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                            Common.logger(log_type, crawler).info("视频已下载\n")
+                        # 标题相似度
+                        elif title_like(log_type, crawler, video_dict['video_title'], cls.platform, env) is True:
+                            Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
+                        else:
+                            cls.download_publish(log_type=log_type,
+                                                 crawler=crawler,
+                                                 video_dict=video_dict,
+                                                 rule_dict=rule_dict,
+                                                 # user_dict=user_dict,
+                                                 env=env)
+                    except Exception as e:
+                        Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
                 Common.logger(log_type, crawler).info('休眠 60 秒\n')
                 time.sleep(60)
 
+
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
         sql = f""" select * from crawler_video where platform="公众号" and out_video_id="{video_id}"; """
@@ -421,34 +318,30 @@ class GongzhonghaoAuthor4:
 
     # 下载/上传
     @classmethod
-    def download_publish(cls, log_type, crawler, video_dict, rule_dict, uid, oss_endpoint, env):
+    def download_publish(cls, log_type, crawler, video_dict, rule_dict, env):
         # 下载视频
-        Common.download_method(log_type=log_type, crawler=crawler, text="video",
-                               title=video_dict["video_title"], url=video_dict["video_url"])
+        Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
         md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-        # 获取视频时长
-        ffmpeg_dict = Common.ffmpeg(log_type, crawler,
-                                    f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
-        if ffmpeg_dict is None:
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
             # 删除视频文件夹
             shutil.rmtree(f"./{crawler}/videos/{md_title}")
-            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
             return
+        # 获取视频时长
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
         video_dict["video_width"] = ffmpeg_dict["width"]
         video_dict["video_height"] = ffmpeg_dict["height"]
         video_dict["duration"] = ffmpeg_dict["duration"]
-        video_size = ffmpeg_dict["size"]
         Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
         Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
         Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
-        Common.logger(log_type, crawler).info(f'video_size:{video_size}')
-        # 视频size=0,直接删除
-        if int(video_size) == 0 or cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
-            # 删除视频文件夹
-            shutil.rmtree(f"./{crawler}/videos/{md_title}")
-            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
-            return
-        if cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
+        if download_rule(log_type, crawler, video_dict, rule_dict) is False:
             shutil.rmtree(f"./{crawler}/videos/{md_title}")
             Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
             return
@@ -460,23 +353,33 @@ class GongzhonghaoAuthor4:
 
         # 上传视频
         Common.logger(log_type, crawler).info("开始上传视频...")
-        strategy = "定向榜爬虫策略"
-        our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                  crawler=crawler,
-                                                  strategy=strategy,
-                                                  our_uid=uid,
-                                                  oss_endpoint=oss_endpoint,
-                                                  env=env)
+        strategy = "定向爬虫策略"
         if env == 'prod':
+            oss_endpoint = "inner"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy=strategy,
+                                                      our_uid="follow",
+                                                      oss_endpoint=oss_endpoint,
+                                                      env=env)
             our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
         else:
+            oss_endpoint = "out"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy=strategy,
+                                                      our_uid="follow",
+                                                      oss_endpoint=oss_endpoint,
+                                                      env=env)
             our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
-        Common.logger(log_type, crawler).info("视频上传完成")
 
         if our_video_id is None:
-            # 删除视频文件夹
-            shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-            return
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
 
         insert_sql = f""" insert into crawler_video(video_id,
                                                     out_user_id,
@@ -508,7 +411,7 @@ class GongzhonghaoAuthor4:
                                                     {int(video_dict['video_height'])}) """
         Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
         MysqlHelper.update_values(log_type, crawler, insert_sql, env)
-        Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+        Common.logger(log_type, crawler).info('视频信息写入数据库成功')
 
         # 视频写入飞书
         Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
@@ -533,29 +436,39 @@ class GongzhonghaoAuthor4:
         Common.logger(log_type, crawler).info('视频下载/上传成功\n')
 
     @classmethod
-    def get_all_videos(cls, log_type, crawler, user_list, rule_dict, oss_endpoint, env):
-        if len(user_list) == 0:
-            Common.logger(log_type, crawler).warning(f"抓取用户列表为空\n")
-            return
-        for user in user_list:
-            # try:
-            user_name = user['nick_name']
-            wechat_name = user['link']
-            uid = user['uid']
-            Common.logger(log_type, crawler).info(f'获取 {user_name} 公众号视频\n')
-            cls.get_videoList(log_type=log_type,
-                              crawler=crawler,
-                              wechat_name=wechat_name,
-                              rule_dict=rule_dict,
-                              user_name=user_name,
-                              uid=uid,
-                              oss_endpoint=oss_endpoint,
-                              env=env)
-            Common.logger(log_type, crawler).info('休眠 60 秒\n')
-            time.sleep(60)
-            # except Exception as e:
-            #     Common.logger(log_type, crawler).info(f'get_all_videos异常:{e}\n')
-
+    def get_all_videos(cls, log_type, crawler, rule_dict, env):
+        while True:
+            sheetid = "Bzv72P"
+            user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+            if user_sheet is None:
+                Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 2秒后重试")
+                time.sleep(2)
+                continue
+            len_sheet = len(user_sheet)
+            if len_sheet <= 301:
+                Common.logger(log_type, crawler).info("抓取用户数<=300,无需启动第四套抓取脚本\n")
+                return
+            if len_sheet >= 401:
+                len_sheet = 401
+            for i in range(301, len_sheet):
+                user_dict = cls.get_users(log_type=log_type,
+                                          crawler=crawler,
+                                          user_sheet=user_sheet,
+                                          sheetid=sheetid,
+                                          i=i,
+                                          env=env)
+                Common.logger(log_type, crawler).info(f'获取 {user_dict["user_name"]} 公众号视频\n')
+                try:
+                    Common.logger(log_type, crawler).info(f'获取 {user_dict["user_name"]} 公众号视频\n')
+                    cls.get_videoList(log_type=log_type,
+                                      crawler=crawler,
+                                      rule_dict=rule_dict,
+                                      user_dict=user_dict,
+                                      env=env)
+                    Common.logger(log_type, crawler).info('休眠 60 秒\n')
+                    time.sleep(60)
+                except Exception as e:
+                    Common.logger(log_type, crawler).info(f'抓取{user_dict["user_name"]}公众号时异常:{e}\n')
 
 if __name__ == "__main__":
     GongzhonghaoAuthor4.get_token("author", "gongzhonghao", "dev")

+ 161 - 256
gongzhonghao/gongzhonghao_author/gongzhonghao5_author.py

@@ -2,7 +2,7 @@
 # @Author: wangkun
 # @Time: 2023/3/28
 import datetime
-import difflib
+# import difflib
 import json
 import os
 import shutil
@@ -20,125 +20,12 @@ from common.common import Common
 from common.feishu import Feishu
 from common.publish import Publish
 from common.scheduling_db import MysqlHelper
-from common.public import get_config_from_mysql
+from common.public import get_config_from_mysql, title_like, download_rule
 
 
 class GongzhonghaoAuthor5:
-    # 翻页参数
-    begin = 0
     platform = "公众号"
 
-    # 基础门槛规则
-    @staticmethod
-    def download_rule(log_type, crawler, video_dict, rule_dict):
-        """
-        下载视频的基本规则
-        :param log_type: 日志
-        :param crawler: 哪款爬虫
-        :param video_dict: 视频信息,字典格式
-        :param rule_dict: 规则信息,字典格式
-        :return: 满足规则,返回 True;反之,返回 False
-        """
-        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
-        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
-        if rule_play_cnt_max == 0:
-            rule_play_cnt_max = 100000000
-
-        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
-        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
-        if rule_duration_max == 0:
-            rule_duration_max = 100000000
-
-        rule_period_min = rule_dict.get('period', {}).get('min', 0)
-        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
-        # if rule_period_max == 0:
-        #     rule_period_max = 100000000
-
-        rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
-        rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
-        if rule_fans_cnt_max == 0:
-            rule_fans_cnt_max = 100000000
-
-        rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
-        rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
-        if rule_videos_cnt_max == 0:
-            rule_videos_cnt_max = 100000000
-
-        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
-        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
-        if rule_like_cnt_max == 0:
-            rule_like_cnt_max = 100000000
-
-        rule_width_min = rule_dict.get('width', {}).get('min', 0)
-        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
-        if rule_width_max == 0:
-            rule_width_max = 100000000
-
-        rule_height_min = rule_dict.get('height', {}).get('min', 0)
-        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
-        if rule_height_max == 0:
-            rule_height_max = 100000000
-
-        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
-        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
-        if rule_share_cnt_max == 0:
-            rule_share_cnt_max = 100000000
-
-        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
-        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
-        if rule_comment_cnt_max == 0:
-            rule_comment_cnt_max = 100000000
-
-        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
-        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 0)
-        if rule_publish_time_max == 0:
-            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
-
-        Common.logger(log_type, crawler).info(
-            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
-
-        if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
-                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
-                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
-                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
-                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
-                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
-                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
-                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min) \
-                and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min):
-            return True
-        else:
-            return False
-
-    @classmethod
-    def title_like(cls, log_type, crawler, title, env):
-        select_sql = f""" select * from crawler_video where platform="公众号" """
-        video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
-        if len(video_list) == 0:
-            return None
-        for video_dict in video_list:
-            video_title = video_dict["video_title"]
-            if difflib.SequenceMatcher(None, title, video_title).quick_ratio() >= 0.8:
-                return True
-            else:
-                pass
-
     # 获取 token
     @classmethod
     def get_token(cls, log_type, crawler, env):
@@ -161,9 +48,34 @@ class GongzhonghaoAuthor5:
             print(f"{k}:{v}")
         return token_dict
 
+    @classmethod
+    def get_users(cls, log_type, crawler, user_sheet, sheetid, i, env):
+        user_name = user_sheet[i][0]
+        wechat_name = user_sheet[i][2]
+        if wechat_name is None or wechat_name.strip() == "" or wechat_name.replace(" ", "") == "":
+            wechat_name = user_name
+        out_uid = user_sheet[i][3]
+        avatar_url = user_sheet[i][4]
+        if out_uid is None or out_uid.strip() == "" or out_uid.replace(" ", "") == "":
+            user_info_dict = cls.get_user_info(log_type=log_type, crawler=crawler, wechat_name=wechat_name, env=env)
+            out_uid = user_info_dict["user_id"]
+            avatar_url = user_info_dict["avatar_url"]
+            Feishu.update_values(log_type, crawler, sheetid, f'D{i + 1}:E{i + 1}', [[out_uid, avatar_url]])
+
+        our_user_dict = {
+            'user_name': user_name,
+            'user_id': out_uid,
+            'wechat_name': wechat_name,
+            'avatar_url': avatar_url,
+        }
+        for k, v in our_user_dict.items():
+            Common.logger(log_type, crawler).info(f"{k}:{v}")
+        return our_user_dict
+
     # 获取用户 fakeid
     @classmethod
-    def get_fakeid(cls, log_type, crawler, wechat_name, env):
+    def get_user_info(cls, log_type, crawler, wechat_name, env):
+        Common.logger(log_type, crawler).info(f"获取站外用户信息:{wechat_name}")
         while True:
             token_dict = cls.get_token(log_type, crawler, env)
             url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
@@ -201,7 +113,6 @@ class GongzhonghaoAuthor5:
             if r.json()["base_resp"]["err_msg"] == "invalid session":
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 过期啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
@@ -209,7 +120,6 @@ class GongzhonghaoAuthor5:
             if r.json()["base_resp"]["err_msg"] == "freq control":
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
@@ -217,16 +127,14 @@ class GongzhonghaoAuthor5:
             if "list" not in r.json() or len(r.json()["list"]) == 0:
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
                 continue
-
-            fakeid = r.json()["list"][0]["fakeid"]
-            head_url = r.json()["list"][0]["round_head_img"]
-            fakeid_dict = {'fakeid': fakeid, 'head_url': head_url}
-            return fakeid_dict
+            user_info_dict = {'user_name': r.json()["list"][0]["nickname"],
+                              'user_id': r.json()["list"][0]["fakeid"],
+                              'avatar_url': r.json()["list"][0]["round_head_img"]}
+            return user_info_dict
 
     # 获取腾讯视频下载链接
     @classmethod
@@ -279,14 +187,10 @@ class GongzhonghaoAuthor5:
 
     # 获取文章列表
     @classmethod
-    def get_videoList(cls, log_type, crawler, wechat_name, rule_dict, user_name, uid, oss_endpoint, env):
-        # try:
+    def get_videoList(cls, log_type, crawler, rule_dict, user_dict, env):
+        begin = 0
         while True:
             token_dict = cls.get_token(log_type, crawler, env)
-            fakeid_dict = cls.get_fakeid(log_type=log_type,
-                                         crawler=crawler,
-                                         wechat_name=wechat_name,
-                                         env=env)
             url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
             headers = {
                 "accept": "*/*",
@@ -308,9 +212,9 @@ class GongzhonghaoAuthor5:
             }
             params = {
                 "action": "list_ex",
-                "begin": str(cls.begin),
+                "begin": str(begin),
                 "count": "5",
-                "fakeid": fakeid_dict['fakeid'],
+                "fakeid": user_dict['user_id'],
                 "type": "9",
                 "query": "",
                 "token": str(token_dict['token']),
@@ -324,7 +228,6 @@ class GongzhonghaoAuthor5:
             if r.json()["base_resp"]["err_msg"] == "invalid session":
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 过期啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
@@ -332,7 +235,6 @@ class GongzhonghaoAuthor5:
             if r.json()["base_resp"]["err_msg"] == "freq control":
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}, 操作人:{token_dict['operator']}, 更换日期:{token_dict['update_time']} 频控啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler,f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
@@ -340,7 +242,6 @@ class GongzhonghaoAuthor5:
             if 'app_msg_list' not in r.json():
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
@@ -349,79 +250,67 @@ class GongzhonghaoAuthor5:
                 Common.logger(log_type, crawler).info('没有更多视频了\n')
                 return
             else:
-                cls.begin += 5
+                begin += 5
                 app_msg_list = r.json()['app_msg_list']
-                for article_url in app_msg_list:
-                    # title
-                    video_title = article_url.get("title", "").replace('/', '').replace('\n', '') \
-                            .replace('.', '').replace('“', '').replace('”', '').replace(' ', '')\
-                            .replace('"', '').replace("'", "")
-                    # aid
-                    aid = article_url.get('aid', '')
-                    # create_time
-                    create_time = article_url.get('create_time', 0)
-                    publish_time_stamp = int(create_time)
-                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-                    avatar_url = fakeid_dict['head_url']
-                    # cover_url
-                    cover_url = article_url.get('cover', '')
-                    # article_url
-                    article_url = article_url.get('link', '')
-                    video_url = cls.get_video_url(article_url, env)
-
-                    video_dict = {
-                        'video_id': aid,
-                        'video_title': video_title,
-                        'publish_time_stamp': publish_time_stamp,
-                        'publish_time_str': publish_time_str,
-                        'user_name': user_name,
-                        'play_cnt': 0,
-                        'comment_cnt': 0,
-                        'like_cnt': 0,
-                        'share_cnt': 0,
-                        'user_id': fakeid_dict['fakeid'],
-                        'avatar_url': avatar_url,
-                        'cover_url': cover_url,
-                        'article_url': article_url,
-                        'video_url': video_url,
-                        'session': f'gongzhonghao-author1-{int(time.time())}'
-                    }
-                    for k, v in video_dict.items():
-                        Common.logger(log_type, crawler).info(f"{k}:{v}")
-
-                    if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('min', 1000)):
-                        Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('min', 1000))}天\n")
-                        cls.begin = 0
-                        return
-
-                    if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
-                        Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
-                    # 标题敏感词过滤
-                    elif any(str(word) if str(word) in video_dict['video_title'] else False
-                             for word in get_config_from_mysql(log_type=log_type,
-                                                               source=crawler,
-                                                               env=env,
-                                                               text="filter",
-                                                               action="")) is True:
-                        Common.logger(log_type, crawler).info("标题已中过滤词\n")
-                    # 已下载判断
-                    elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
-                        Common.logger(log_type, crawler).info("视频已下载\n")
-                    # 标题相似度
-                    elif cls.title_like(log_type, crawler, video_dict['video_title'], env) is True:
-                        Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
-                    else:
-                        cls.download_publish(log_type=log_type,
-                                             crawler=crawler,
-                                             video_dict=video_dict,
-                                             rule_dict=rule_dict,
-                                             uid=uid,
-                                             oss_endpoint=oss_endpoint,
-                                             env=env)
-
+                for article in app_msg_list:
+                    try:
+                        create_time = article.get('create_time', 0)
+                        publish_time_stamp = int(create_time)
+                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                        article_url = article.get('link', '')
+                        video_dict = {
+                            'video_id': article.get('aid', ''),
+                            'video_title': article.get("title", "").replace(' ', '').replace('"', '').replace("'", ""),
+                            'publish_time_stamp': publish_time_stamp,
+                            'publish_time_str': publish_time_str,
+                            'user_name': user_dict["user_name"],
+                            'play_cnt': 0,
+                            'comment_cnt': 0,
+                            'like_cnt': 0,
+                            'share_cnt': 0,
+                            'user_id': user_dict['user_id'],
+                            'avatar_url': user_dict['avatar_url'],
+                            'cover_url': article.get('cover', ''),
+                            'article_url': article.get('link', ''),
+                            'video_url': cls.get_video_url(article_url, env),
+                            'session': f'gongzhonghao-author1-{int(time.time())}'
+                        }
+                        for k, v in video_dict.items():
+                            Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                        if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('max', 1000)):
+                            Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
+                            return
+
+                        if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
+                            Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
+                        # 标题敏感词过滤
+                        elif any(str(word) if str(word) in video_dict['video_title'] else False
+                                 for word in get_config_from_mysql(log_type=log_type,
+                                                                   source=crawler,
+                                                                   env=env,
+                                                                   text="filter",
+                                                                   action="")) is True:
+                            Common.logger(log_type, crawler).info("标题已中过滤词\n")
+                        # 已下载判断
+                        elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                            Common.logger(log_type, crawler).info("视频已下载\n")
+                        # 标题相似度
+                        elif title_like(log_type, crawler, video_dict['video_title'], cls.platform, env) is True:
+                            Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
+                        else:
+                            cls.download_publish(log_type=log_type,
+                                                 crawler=crawler,
+                                                 video_dict=video_dict,
+                                                 rule_dict=rule_dict,
+                                                 # user_dict=user_dict,
+                                                 env=env)
+                    except Exception as e:
+                        Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
                 Common.logger(log_type, crawler).info('休眠 60 秒\n')
                 time.sleep(60)
 
+
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
         sql = f""" select * from crawler_video where platform="公众号" and out_video_id="{video_id}"; """
@@ -430,34 +319,30 @@ class GongzhonghaoAuthor5:
 
     # 下载/上传
     @classmethod
-    def download_publish(cls, log_type, crawler, video_dict, rule_dict, uid, oss_endpoint, env):
+    def download_publish(cls, log_type, crawler, video_dict, rule_dict, env):
         # 下载视频
-        Common.download_method(log_type=log_type, crawler=crawler, text="video",
-                               title=video_dict["video_title"], url=video_dict["video_url"])
+        Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
         md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-        # 获取视频时长
-        ffmpeg_dict = Common.ffmpeg(log_type, crawler,
-                                    f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
-        if ffmpeg_dict is None:
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
             # 删除视频文件夹
             shutil.rmtree(f"./{crawler}/videos/{md_title}")
-            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
             return
+        # 获取视频时长
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
         video_dict["video_width"] = ffmpeg_dict["width"]
         video_dict["video_height"] = ffmpeg_dict["height"]
         video_dict["duration"] = ffmpeg_dict["duration"]
-        video_size = ffmpeg_dict["size"]
         Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
         Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
         Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
-        Common.logger(log_type, crawler).info(f'video_size:{video_size}')
-        # 视频size=0,直接删除
-        if int(video_size) == 0 or cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
-            # 删除视频文件夹
-            shutil.rmtree(f"./{crawler}/videos/{md_title}")
-            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
-            return
-        if cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
+        if download_rule(log_type, crawler, video_dict, rule_dict) is False:
             shutil.rmtree(f"./{crawler}/videos/{md_title}")
             Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
             return
@@ -469,23 +354,33 @@ class GongzhonghaoAuthor5:
 
         # 上传视频
         Common.logger(log_type, crawler).info("开始上传视频...")
-        strategy = "定向榜爬虫策略"
-        our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                  crawler=crawler,
-                                                  strategy=strategy,
-                                                  our_uid=uid,
-                                                  oss_endpoint=oss_endpoint,
-                                                  env=env)
+        strategy = "定向爬虫策略"
         if env == 'prod':
+            oss_endpoint = "inner"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy=strategy,
+                                                      our_uid="follow",
+                                                      oss_endpoint=oss_endpoint,
+                                                      env=env)
             our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
         else:
+            oss_endpoint = "out"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy=strategy,
+                                                      our_uid="follow",
+                                                      oss_endpoint=oss_endpoint,
+                                                      env=env)
             our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
-        Common.logger(log_type, crawler).info("视频上传完成")
 
         if our_video_id is None:
-            # 删除视频文件夹
-            shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-            return
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
 
         insert_sql = f""" insert into crawler_video(video_id,
                                                     out_user_id,
@@ -517,7 +412,7 @@ class GongzhonghaoAuthor5:
                                                     {int(video_dict['video_height'])}) """
         Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
         MysqlHelper.update_values(log_type, crawler, insert_sql, env)
-        Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+        Common.logger(log_type, crawler).info('视频信息写入数据库成功')
 
         # 视频写入飞书
         Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
@@ -542,29 +437,39 @@ class GongzhonghaoAuthor5:
         Common.logger(log_type, crawler).info('视频下载/上传成功\n')
 
     @classmethod
-    def get_all_videos(cls, log_type, crawler, user_list, rule_dict, oss_endpoint, env):
-        if len(user_list) == 0:
-            Common.logger(log_type, crawler).warning(f"抓取用户列表为空\n")
-            return
-        for user in user_list:
-            # try:
-            user_name = user['nick_name']
-            wechat_name = user['link']
-            uid = user['uid']
-            Common.logger(log_type, crawler).info(f'获取 {user_name} 公众号视频\n')
-            cls.get_videoList(log_type=log_type,
-                              crawler=crawler,
-                              wechat_name=wechat_name,
-                              rule_dict=rule_dict,
-                              user_name=user_name,
-                              uid=uid,
-                              oss_endpoint=oss_endpoint,
-                              env=env)
-            cls.begin = 0
-            Common.logger(log_type, crawler).info('休眠 60 秒\n')
-            time.sleep(60)
-            # except Exception as e:
-            #     Common.logger(log_type, crawler).info(f'get_all_videos异常:{e}\n')
+    def get_all_videos(cls, log_type, crawler, rule_dict, env):
+        while True:
+            sheetid = "Bzv72P"
+            user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+            if user_sheet is None:
+                Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 2秒后重试")
+                time.sleep(2)
+                continue
+            len_sheet = len(user_sheet)
+            if len_sheet <= 401:
+                Common.logger(log_type, crawler).info("抓取用户数<=400,无需启动第五套抓取脚本\n")
+                return
+            # if len_sheet >= 501:
+            #     len_sheet = 501
+            for i in range(401, len_sheet):
+                user_dict = cls.get_users(log_type=log_type,
+                                          crawler=crawler,
+                                          user_sheet=user_sheet,
+                                          sheetid=sheetid,
+                                          i=i,
+                                          env=env)
+                Common.logger(log_type, crawler).info(f'获取 {user_dict["user_name"]} 公众号视频\n')
+                try:
+                    Common.logger(log_type, crawler).info(f'获取 {user_dict["user_name"]} 公众号视频\n')
+                    cls.get_videoList(log_type=log_type,
+                                      crawler=crawler,
+                                      rule_dict=rule_dict,
+                                      user_dict=user_dict,
+                                      env=env)
+                    Common.logger(log_type, crawler).info('休眠 60 秒\n')
+                    time.sleep(60)
+                except Exception as e:
+                    Common.logger(log_type, crawler).info(f'抓取{user_dict["user_name"]}公众号时异常:{e}\n')
 
 
 if __name__ == "__main__":