wangkun 1 jaar geleden
bovenliggende
commit
1a603d121c
47 gewijzigde bestanden met toevoegingen van 591 en 815 verwijderingen
  1. 1 0
      README.MD
  2. BIN
      benshanzhufu/benshanzhufu_recommend/.DS_Store
  3. BIN
      benshanzhufu/logs/.DS_Store
  4. 26 0
      common/public.py
  5. BIN
      douyin/.DS_Store
  6. BIN
      douyin/douyin_recommend/.DS_Store
  7. BIN
      ganggangdouchuan/logs/.DS_Store
  8. BIN
      ganggangdouchuan/videos/.DS_Store
  9. BIN
      gongzhonghao/.DS_Store
  10. 181 262
      gongzhonghao/gongzhonghao_author/gongzhonghao1_author.py
  11. 184 262
      gongzhonghao/gongzhonghao_author/gongzhonghao2_author.py
  12. 187 252
      gongzhonghao/gongzhonghao_author/gongzhonghao3_author.py
  13. BIN
      gongzhonghao/gongzhonghao_follow/.DS_Store
  14. 3 12
      gongzhonghao/gongzhonghao_main/run_gongzhonghao1_author_scheduling.py
  15. 3 12
      gongzhonghao/gongzhonghao_main/run_gongzhonghao2_author_scheduling.py
  16. 6 15
      gongzhonghao/gongzhonghao_main/run_gongzhonghao3_author_scheduling.py
  17. BIN
      gongzhonghao/logs/.DS_Store
  18. BIN
      gongzhonghao/videos/.DS_Store
  19. BIN
      jixiangxingfu/.DS_Store
  20. BIN
      jixiangxingfu/jixiangxingfu_recommend/.DS_Store
  21. BIN
      jixiangxingfu/videos/.DS_Store
  22. BIN
      kuaishou/kuaishou_follow/.DS_Store
  23. BIN
      kuaishou/logs/.DS_Store
  24. BIN
      main/.DS_Store
  25. BIN
      main/main_logs/.DS_Store
  26. BIN
      scheduling/logs/.DS_Store
  27. BIN
      shipinhao/shipinhao_search/.DS_Store
  28. BIN
      suisuiniannianyingfuqi/logs/.DS_Store
  29. BIN
      weixinzhishu/logs/.DS_Store
  30. BIN
      weixinzhishu/weixinzhishu_hot_search/.DS_Store
  31. BIN
      weixinzhishu/weixinzhishu_main/.DS_Store
  32. BIN
      xiaoniangao/.DS_Store
  33. BIN
      xiaoniangao/logs/.DS_Store
  34. BIN
      xiaoniangao/videos/.DS_Store
  35. BIN
      xiaoniangao/xiaoniangao_follow/.DS_Store
  36. BIN
      xigua/logs/.DS_Store
  37. BIN
      xigua/xigua_follow/.DS_Store
  38. BIN
      youtube/.DS_Store
  39. BIN
      youtube/logs/.DS_Store
  40. BIN
      youtube/videos/.DS_Store
  41. BIN
      youtube/youtube_follow/.DS_Store
  42. BIN
      zhiqingtiantiankan/.DS_Store
  43. BIN
      zhiqingtiantiankan/logs/.DS_Store
  44. BIN
      zhiqingtiantiankan/zhiqingtiantiankan_recommend/.DS_Store
  45. BIN
      zhongmiaoyinxin/.DS_Store
  46. BIN
      zhongmiaoyinxin/logs/.DS_Store
  47. BIN
      zhongmiaoyinxin/zhongmiaoyinxin_recommend/.DS_Store

+ 1 - 0
README.MD

@@ -241,4 +241,5 @@ ps aux | grep run_xigua_search | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep run_suisuiniannianyingfuqi | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep run_benshanzhufu | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep run_kuaishou | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep run_gongzhonghao | grep -v grep | awk '{print $2}' | xargs kill -9
 ```

BIN
benshanzhufu/benshanzhufu_recommend/.DS_Store


BIN
benshanzhufu/logs/.DS_Store


+ 26 - 0
common/public.py

@@ -4,6 +4,7 @@
 import os, sys
 import time
 import random
+import difflib
 sys.path.append(os.getcwd())
 from common.common import Common
 from common.scheduling_db import MysqlHelper
@@ -21,6 +22,31 @@ def get_user_from_mysql(log_type, crawler, source, env, action=''):
         return []
 
 
+def title_like(log_type, crawler, platform, title, env):
+    """
+    标题相似度
+    :param log_type: 日志
+    :param crawler: 哪款爬虫
+    :param platform: 爬虫渠道,如:公众号 / 小年糕
+    :param title: 视频标题
+    :param env: 环境
+    :return: 相似度>=80%,返回 True;反之,返回 False
+    """
+    select_sql = f""" select video_title from crawler_video where platform="{platform}" """
+    video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
+    # print(video_list)
+    if len(video_list) == 0:
+        return False
+    for video_dict in video_list:
+        video_title = video_dict["video_title"]
+        # print(video_title)
+        if difflib.SequenceMatcher(None, title, video_title).quick_ratio() >= 0.8:
+            return True
+        else:
+            continue
+    return False
+
+
 def get_config_from_mysql(log_type, source, env, text, action=''):
     select_sql = f"""select * from crawler_config where source="{source}" """
     contents = MysqlHelper.get_values(log_type, source, select_sql, env, action=action)

BIN
douyin/.DS_Store


BIN
douyin/douyin_recommend/.DS_Store


BIN
ganggangdouchuan/logs/.DS_Store


BIN
ganggangdouchuan/videos/.DS_Store


BIN
gongzhonghao/.DS_Store


+ 181 - 262
gongzhonghao/gongzhonghao_author/gongzhonghao1_author.py

@@ -2,7 +2,6 @@
 # @Author: wangkun
 # @Time: 2023/3/28
 import datetime
-import difflib
 import json
 import os
 import shutil
@@ -19,133 +18,20 @@ sys.path.append(os.getcwd())
 from common.common import Common
 from common.feishu import Feishu
 from common.publish import Publish
+from common.getuser import getUser
 from common.scheduling_db import MysqlHelper
-from common.public import get_config_from_mysql
+from common.public import get_config_from_mysql, download_rule, title_like
 
 
 class GongzhonghaoAuthor1:
-    # 翻页参数
-    begin = 0
     platform = "公众号"
 
-    # 基础门槛规则
-    @staticmethod
-    def download_rule(log_type, crawler, video_dict, rule_dict):
-        """
-        下载视频的基本规则
-        :param log_type: 日志
-        :param crawler: 哪款爬虫
-        :param video_dict: 视频信息,字典格式
-        :param rule_dict: 规则信息,字典格式
-        :return: 满足规则,返回 True;反之,返回 False
-        """
-        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
-        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
-        if rule_play_cnt_max == 0:
-            rule_play_cnt_max = 100000000
-
-        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
-        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
-        if rule_duration_max == 0:
-            rule_duration_max = 100000000
-
-        rule_period_min = rule_dict.get('period', {}).get('min', 0)
-        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
-        # if rule_period_max == 0:
-        #     rule_period_max = 100000000
-
-        rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
-        rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
-        if rule_fans_cnt_max == 0:
-            rule_fans_cnt_max = 100000000
-
-        rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
-        rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
-        if rule_videos_cnt_max == 0:
-            rule_videos_cnt_max = 100000000
-
-        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
-        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
-        if rule_like_cnt_max == 0:
-            rule_like_cnt_max = 100000000
-
-        rule_width_min = rule_dict.get('width', {}).get('min', 0)
-        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
-        if rule_width_max == 0:
-            rule_width_max = 100000000
-
-        rule_height_min = rule_dict.get('height', {}).get('min', 0)
-        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
-        if rule_height_max == 0:
-            rule_height_max = 100000000
-
-        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
-        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
-        if rule_share_cnt_max == 0:
-            rule_share_cnt_max = 100000000
-
-        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
-        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
-        if rule_comment_cnt_max == 0:
-            rule_comment_cnt_max = 100000000
-
-        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
-        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 0)
-        if rule_publish_time_max == 0:
-            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
-
-        Common.logger(log_type, crawler).info(
-            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
-
-        if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
-                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
-                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
-                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
-                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
-                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
-                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
-                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min)\
-                and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min):
-            return True
-        else:
-            return False
-
-    @classmethod
-    def title_like(cls, log_type, crawler, title, env):
-        select_sql = f""" select * from crawler_video where platform="公众号" """
-        video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
-        if len(video_list) == 0:
-            return None
-        for video_dict in video_list:
-            video_title = video_dict["video_title"]
-            if difflib.SequenceMatcher(None, title, video_title).quick_ratio() >= 0.8:
-                return True
-            else:
-                pass
-
     # 获取 token
     @classmethod
     def get_token(cls, log_type, crawler, env):
         select_sql = f""" select * from crawler_config where source="{crawler}" and title LIKE "%公众号_1%";"""
         configs = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
         if len(configs) == 0:
-            # Common.logger(log_type, crawler).warning(f"公众号_1未配置token")
             Feishu.bot(log_type, crawler, "公众号_1:未配置token")
             time.sleep(60)
             return None
@@ -161,9 +47,70 @@ class GongzhonghaoAuthor1:
         #     print(f"{k}:{v}")
         return token_dict
 
+    @classmethod
+    def get_users(cls, log_type, crawler, sheetid, env):
+        while True:
+            user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+            if user_sheet is None:
+                Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 2秒后重试")
+                time.sleep(2)
+                continue
+            user_list = []
+            len_sheet = len(user_sheet)
+            if len_sheet >= 101:
+                len_sheet = 101
+            for i in range(1, len_sheet):
+            # for i in range(1, 3):
+                user_name = user_sheet[i][0]
+                wechat_name = user_sheet[i][2]
+                if wechat_name is None:
+                    wechat_name = user_name
+                our_uid = user_sheet[i][5]
+                our_user_link = user_sheet[i][6]
+                user_info_dict = cls.get_user_info(log_type=log_type, crawler=crawler, wechat_name=wechat_name, env=env)
+                out_uid = user_info_dict["user_id"]
+                avatar_url = user_info_dict["avatar_url"]
+                tag1 = user_sheet[i][7]
+                tag2 = user_sheet[i][8]
+                tag3 = user_sheet[i][9]
+                tag4 = user_sheet[i][10]
+                tag5 = user_sheet[i][11]
+                tag6 = user_sheet[i][12]
+                Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息")
+                if out_uid is None or our_uid is None:
+                    # 用来创建our_id的信息
+                    user_dict = {
+                        'recommendStatus': -6,
+                        'appRecommendStatus': -6,
+                        'nickName': user_info_dict["user_name"],
+                        'avatarUrl': user_info_dict['avatar_url'],
+                        'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5},{tag6}',
+                    }
+                    our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
+                    Common.logger(log_type, crawler).info(f'新创建的站内UID:{our_uid}')
+                    if env == 'prod':
+                        our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
+                    else:
+                        our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
+                    Feishu.update_values(log_type, crawler, sheetid, f'D{i+1}:G{i+1}', [[user_info_dict["user_id"], user_info_dict["avatar_url"], our_uid, our_user_link]])
+                    Common.logger(log_type, crawler).info(f'用户信息创建成功!\n')
+                else:
+                    Common.logger(log_type, crawler).info("用户信息已存在\n")
+                our_user_dict = {
+                    'user_name': user_name,
+                    'user_id': out_uid,
+                    'wechat_name': wechat_name,
+                    'our_uid': our_uid,
+                    'our_user_link': our_user_link,
+                    'avatar_url': avatar_url,
+                }
+                user_list.append(our_user_dict)
+
+            return user_list
+
     # 获取用户 fakeid
     @classmethod
-    def get_fakeid(cls, log_type, crawler, wechat_name, env):
+    def get_user_info(cls, log_type, crawler, wechat_name, env):
         while True:
             token_dict = cls.get_token(log_type, crawler, env)
             url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
@@ -201,7 +148,6 @@ class GongzhonghaoAuthor1:
             if r.json()["base_resp"]["err_msg"] == "invalid session":
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 过期啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
@@ -209,7 +155,6 @@ class GongzhonghaoAuthor1:
             if r.json()["base_resp"]["err_msg"] == "freq control":
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
@@ -217,21 +162,18 @@ class GongzhonghaoAuthor1:
             if "list" not in r.json() or len(r.json()["list"]) == 0:
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
                 continue
-
-            fakeid = r.json()["list"][0]["fakeid"]
-            head_url = r.json()["list"][0]["round_head_img"]
-            fakeid_dict = {'fakeid': fakeid, 'head_url': head_url}
-            return fakeid_dict
+            user_info_dict = {'user_name': r.json()["list"][0]["nickname"],
+                              'user_id': r.json()["list"][0]["fakeid"],
+                              'avatar_url': r.json()["list"][0]["round_head_img"]}
+            return user_info_dict
 
     # 获取腾讯视频下载链接
     @classmethod
     def get_tencent_video_url(cls, video_id):
-        # try:
         url = 'https://vv.video.qq.com/getinfo?vids=' + str(video_id) + '&platform=101001&charge=0&otype=json'
         response = requests.get(url=url).text.replace('QZOutputJson=', '').replace('"};', '"}')
         response = json.loads(response)
@@ -239,12 +181,9 @@ class GongzhonghaoAuthor1:
         fvkey = response['vl']['vi'][0]['fvkey']
         video_url = url + str(video_id) + '.mp4?vkey=' + fvkey
         return video_url
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error(f"get_tencent_video_url异常:{e}\n")
 
     @classmethod
     def get_video_url(cls, article_url, env):
-        # try:
         # 打印请求配置
         ca = DesiredCapabilities.CHROME
         ca["goog:loggingPrefs"] = {"performance": "ALL"}
@@ -261,10 +200,9 @@ class GongzhonghaoAuthor1:
             driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
         else:
             driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
-                '/Users/wangkun/Downloads/chromedriver/chromedriver_v111/chromedriver'))
+                '/Users/wangkun/Downloads/chromedriver/chromedriver_v113/chromedriver'))
 
         driver.implicitly_wait(10)
-        # Common.logger(log_type, crawler).info('打开文章链接')
         driver.get(article_url)
         time.sleep(1)
 
@@ -280,19 +218,13 @@ class GongzhonghaoAuthor1:
             video_url = 0
         driver.quit()
         return video_url
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).info(f'get_video_url异常:{e}\n')
 
     # 获取文章列表
     @classmethod
-    def get_videoList(cls, log_type, crawler, wechat_name, rule_dict, user_name, uid, oss_endpoint, env):
-        # try:
+    def get_videoList(cls, log_type, crawler, rule_dict, user_dict, env):
+        begin = 0
         while True:
             token_dict = cls.get_token(log_type, crawler, env)
-            fakeid_dict = cls.get_fakeid(log_type=log_type,
-                                         crawler=crawler,
-                                         wechat_name=wechat_name,
-                                         env=env)
             url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
             headers = {
                 "accept": "*/*",
@@ -314,9 +246,9 @@ class GongzhonghaoAuthor1:
             }
             params = {
                 "action": "list_ex",
-                "begin": str(cls.begin),
+                "begin": str(begin),
                 "count": "5",
-                "fakeid": fakeid_dict['fakeid'],
+                "fakeid": user_dict['user_id'],
                 "type": "9",
                 "query": "",
                 "token": str(token_dict['token']),
@@ -330,7 +262,6 @@ class GongzhonghaoAuthor1:
             if r.json()["base_resp"]["err_msg"] == "invalid session":
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 过期啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
@@ -338,7 +269,6 @@ class GongzhonghaoAuthor1:
             if r.json()["base_resp"]["err_msg"] == "freq control":
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}, 操作人:{token_dict['operator']}, 更换日期:{token_dict['update_time']} 频控啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler,f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
@@ -346,7 +276,6 @@ class GongzhonghaoAuthor1:
             if 'app_msg_list' not in r.json():
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
@@ -355,76 +284,63 @@ class GongzhonghaoAuthor1:
                 Common.logger(log_type, crawler).info('没有更多视频了\n')
                 return
             else:
-                cls.begin += 5
+                begin += 5
                 app_msg_list = r.json()['app_msg_list']
-                for article_url in app_msg_list:
-                    # title
-                    video_title = article_url.get("title", "").replace('/', '').replace('\n', '') \
-                            .replace('.', '').replace('“', '').replace('”', '').replace(' ', '')\
-                            .replace('"', '').replace("'", "")
-                    # aid
-                    aid = article_url.get('aid', '')
-                    # create_time
-                    create_time = article_url.get('create_time', 0)
-                    publish_time_stamp = int(create_time)
-                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-                    avatar_url = fakeid_dict['head_url']
-                    # cover_url
-                    cover_url = article_url.get('cover', '')
-                    # article_url
-                    article_url = article_url.get('link', '')
-                    video_url = cls.get_video_url(article_url, env)
-
-                    video_dict = {
-                        'video_id': aid,
-                        'video_title': video_title,
-                        'publish_time_stamp': publish_time_stamp,
-                        'publish_time_str': publish_time_str,
-                        'user_name': user_name,
-                        'play_cnt': 0,
-                        'comment_cnt': 0,
-                        'like_cnt': 0,
-                        'share_cnt': 0,
-                        'user_id': fakeid_dict['fakeid'],
-                        'avatar_url': avatar_url,
-                        'cover_url': cover_url,
-                        'article_url': article_url,
-                        'video_url': video_url,
-                        'session': f'gongzhonghao-author1-{int(time.time())}'
-                    }
-                    for k, v in video_dict.items():
-                        Common.logger(log_type, crawler).info(f"{k}:{v}")
-
-                    if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('min', 1000)):
-                        Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('min', 1000))}天\n")
-                        cls.begin = 0
-                        return
-
-                    if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
-                        Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
-                    # 标题敏感词过滤
-                    elif any(str(word) if str(word) in video_dict['video_title'] else False
-                             for word in get_config_from_mysql(log_type=log_type,
-                                                               source=crawler,
-                                                               env=env,
-                                                               text="filter",
-                                                               action="")) is True:
-                        Common.logger(log_type, crawler).info("标题已中过滤词\n")
-                    # 已下载判断
-                    elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
-                        Common.logger(log_type, crawler).info("视频已下载\n")
-                    # 标题相似度
-                    elif cls.title_like(log_type, crawler, video_dict['video_title'], env) is True:
-                        Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
-                    else:
-                        cls.download_publish(log_type=log_type,
-                                             crawler=crawler,
-                                             video_dict=video_dict,
-                                             rule_dict=rule_dict,
-                                             uid=uid,
-                                             oss_endpoint=oss_endpoint,
-                                             env=env)
-
+                for article in app_msg_list:
+                    try:
+                        create_time = article.get('create_time', 0)
+                        publish_time_stamp = int(create_time)
+                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                        article_url = article.get('link', '')
+                        video_dict = {
+                            'video_id': article.get('aid', ''),
+                            'video_title': article.get("title", "").replace(' ', '').replace('"', '').replace("'", ""),
+                            'publish_time_stamp': publish_time_stamp,
+                            'publish_time_str': publish_time_str,
+                            'user_name': user_dict["user_name"],
+                            'play_cnt': 0,
+                            'comment_cnt': 0,
+                            'like_cnt': 0,
+                            'share_cnt': 0,
+                            'user_id': user_dict['user_id'],
+                            'avatar_url': user_dict['avatar_url'],
+                            'cover_url': article.get('cover', ''),
+                            'article_url': article.get('link', ''),
+                            'video_url': cls.get_video_url(article_url, env),
+                            'session': f'gongzhonghao-author1-{int(time.time())}'
+                        }
+                        for k, v in video_dict.items():
+                            Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                        if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('max', 1000)):
+                            Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
+                            return
+
+                        if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
+                            Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
+                        # 标题敏感词过滤
+                        elif any(str(word) if str(word) in video_dict['video_title'] else False
+                                 for word in get_config_from_mysql(log_type=log_type,
+                                                                   source=crawler,
+                                                                   env=env,
+                                                                   text="filter",
+                                                                   action="")) is True:
+                            Common.logger(log_type, crawler).info("标题已中过滤词\n")
+                        # 已下载判断
+                        elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                            Common.logger(log_type, crawler).info("视频已下载\n")
+                        # 标题相似度
+                        elif title_like(log_type, crawler, video_dict['video_title'], cls.platform, env) is True:
+                            Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
+                        else:
+                            cls.download_publish(log_type=log_type,
+                                                 crawler=crawler,
+                                                 video_dict=video_dict,
+                                                 rule_dict=rule_dict,
+                                                 user_dict=user_dict,
+                                                 env=env)
+                    except Exception as e:
+                        Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
                 Common.logger(log_type, crawler).info('休眠 60 秒\n')
                 time.sleep(60)
 
@@ -436,34 +352,30 @@ class GongzhonghaoAuthor1:
 
     # 下载/上传
     @classmethod
-    def download_publish(cls, log_type, crawler, video_dict, rule_dict, uid, oss_endpoint, env):
+    def download_publish(cls, log_type, crawler, video_dict, rule_dict, user_dict, env):
         # 下载视频
-        Common.download_method(log_type=log_type, crawler=crawler, text="video",
-                               title=video_dict["video_title"], url=video_dict["video_url"])
+        Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
         md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-        # 获取视频时长
-        ffmpeg_dict = Common.ffmpeg(log_type, crawler,
-                                    f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
-        if ffmpeg_dict is None:
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
             # 删除视频文件夹
             shutil.rmtree(f"./{crawler}/videos/{md_title}")
-            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
             return
+        # 获取视频时长
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
         video_dict["video_width"] = ffmpeg_dict["width"]
         video_dict["video_height"] = ffmpeg_dict["height"]
         video_dict["duration"] = ffmpeg_dict["duration"]
-        video_size = ffmpeg_dict["size"]
         Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
         Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
         Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
-        Common.logger(log_type, crawler).info(f'video_size:{video_size}')
-        # 视频size=0,直接删除
-        if int(video_size) == 0 or cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
-            # 删除视频文件夹
-            shutil.rmtree(f"./{crawler}/videos/{md_title}")
-            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
-            return
-        if cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
+        if download_rule(log_type, crawler, video_dict, rule_dict) is False:
             shutil.rmtree(f"./{crawler}/videos/{md_title}")
             Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
             return
@@ -476,22 +388,32 @@ class GongzhonghaoAuthor1:
         # 上传视频
         Common.logger(log_type, crawler).info("开始上传视频...")
         strategy = "定向榜爬虫策略"
-        our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                  crawler=crawler,
-                                                  strategy=strategy,
-                                                  our_uid=uid,
-                                                  oss_endpoint=oss_endpoint,
-                                                  env=env)
         if env == 'prod':
+            oss_endpoint = "inner"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy=strategy,
+                                                      our_uid=user_dict["our_uid"],
+                                                      oss_endpoint=oss_endpoint,
+                                                      env=env)
             our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
         else:
+            oss_endpoint = "out"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy=strategy,
+                                                      our_uid=user_dict["our_uid"],
+                                                      oss_endpoint=oss_endpoint,
+                                                      env=env)
             our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
-        Common.logger(log_type, crawler).info("视频上传完成")
 
         if our_video_id is None:
-            # 删除视频文件夹
-            shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-            return
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
 
         insert_sql = f""" insert into crawler_video(video_id,
                                                     out_user_id,
@@ -523,7 +445,7 @@ class GongzhonghaoAuthor1:
                                                     {int(video_dict['video_height'])}) """
         Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
         MysqlHelper.update_values(log_type, crawler, insert_sql, env)
-        Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+        Common.logger(log_type, crawler).info('视频信息写入数据库成功')
 
         # 视频写入飞书
         Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
@@ -548,32 +470,29 @@ class GongzhonghaoAuthor1:
         Common.logger(log_type, crawler).info('视频下载/上传成功\n')
 
     @classmethod
-    def get_all_videos(cls, log_type, crawler, user_list, rule_dict, oss_endpoint, env):
+    def get_all_videos(cls, log_type, crawler, rule_dict, env):
+        user_list = cls.get_users(log_type, crawler, "Bzv72P", env)
         if len(user_list) == 0:
             Common.logger(log_type, crawler).warning(f"抓取用户列表为空\n")
             return
-        for user in user_list:
-            # try:
-            user_name = user['nick_name']
-            wechat_name = user['link']
-            uid = user['uid']
-            Common.logger(log_type, crawler).info(f'获取 {user_name} 公众号视频\n')
-            cls.get_videoList(log_type=log_type,
-                              crawler=crawler,
-                              wechat_name=wechat_name,
-                              rule_dict=rule_dict,
-                              user_name=user_name,
-                              uid=uid,
-                              oss_endpoint=oss_endpoint,
-                              env=env)
-            cls.begin = 0
-            Common.logger(log_type, crawler).info('休眠 60 秒\n')
-            time.sleep(60)
-            # except Exception as e:
-            #     Common.logger(log_type, crawler).info(f'get_all_videos异常:{e}\n')
+        for user_dict in user_list:
+            try:
+                Common.logger(log_type, crawler).info(f'获取 {user_dict["user_name"]} 公众号视频\n')
+                cls.get_videoList(log_type=log_type,
+                                  crawler=crawler,
+                                  rule_dict=rule_dict,
+                                  user_dict=user_dict,
+                                  env=env)
+                Common.logger(log_type, crawler).info('休眠 60 秒\n')
+                time.sleep(60)
+            except Exception as e:
+                Common.logger(log_type, crawler).info(f'抓取{user_dict["user_name"]}公众号时异常:{e}\n')
 
 
 if __name__ == "__main__":
-    GongzhonghaoAuthor1.get_token("author", "gongzhonghao", "dev")
+    # GongzhonghaoAuthor1.get_token("author", "gongzhonghao", "dev")
+    print(GongzhonghaoAuthor1.get_users("author", "gongzhonghao", "Bzv72P", "dev"))
     # print(get_config_from_mysql("author", "gongzhonghao", "dev", "filter", action=""))
+    # print(title_like("author", "gongzhonghao", "公众号", "123", "dev"))
+
     pass

+ 184 - 262
gongzhonghao/gongzhonghao_author/gongzhonghao2_author.py

@@ -2,7 +2,6 @@
 # @Author: wangkun
 # @Time: 2023/3/28
 import datetime
-import difflib
 import json
 import os
 import shutil
@@ -16,136 +15,23 @@ from selenium.webdriver.chrome.service import Service
 from selenium.webdriver.common.by import By
 from selenium import webdriver
 sys.path.append(os.getcwd())
+from common.getuser import getUser
 from common.common import Common
 from common.feishu import Feishu
 from common.publish import Publish
 from common.scheduling_db import MysqlHelper
-from common.public import get_config_from_mysql
+from common.public import get_config_from_mysql, title_like, download_rule
 
 
 class GongzhonghaoAuthor2:
-    # 翻页参数
-    begin = 0
     platform = "公众号"
 
-    # 基础门槛规则
-    @staticmethod
-    def download_rule(log_type, crawler, video_dict, rule_dict):
-        """
-        下载视频的基本规则
-        :param log_type: 日志
-        :param crawler: 哪款爬虫
-        :param video_dict: 视频信息,字典格式
-        :param rule_dict: 规则信息,字典格式
-        :return: 满足规则,返回 True;反之,返回 False
-        """
-        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
-        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
-        if rule_play_cnt_max == 0:
-            rule_play_cnt_max = 100000000
-
-        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
-        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
-        if rule_duration_max == 0:
-            rule_duration_max = 100000000
-
-        rule_period_min = rule_dict.get('period', {}).get('min', 0)
-        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
-        # if rule_period_max == 0:
-        #     rule_period_max = 100000000
-
-        rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
-        rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
-        if rule_fans_cnt_max == 0:
-            rule_fans_cnt_max = 100000000
-
-        rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
-        rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
-        if rule_videos_cnt_max == 0:
-            rule_videos_cnt_max = 100000000
-
-        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
-        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
-        if rule_like_cnt_max == 0:
-            rule_like_cnt_max = 100000000
-
-        rule_width_min = rule_dict.get('width', {}).get('min', 0)
-        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
-        if rule_width_max == 0:
-            rule_width_max = 100000000
-
-        rule_height_min = rule_dict.get('height', {}).get('min', 0)
-        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
-        if rule_height_max == 0:
-            rule_height_max = 100000000
-
-        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
-        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
-        if rule_share_cnt_max == 0:
-            rule_share_cnt_max = 100000000
-
-        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
-        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
-        if rule_comment_cnt_max == 0:
-            rule_comment_cnt_max = 100000000
-
-        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
-        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 0)
-        if rule_publish_time_max == 0:
-            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
-
-        Common.logger(log_type, crawler).info(
-            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
-
-        if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
-                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
-                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
-                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
-                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
-                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
-                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
-                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min) \
-                and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min):
-            return True
-        else:
-            return False
-
-    @classmethod
-    def title_like(cls, log_type, crawler, title, env):
-        select_sql = f""" select * from crawler_video where platform="公众号" """
-        video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
-        if len(video_list) == 0:
-            return None
-        for video_dict in video_list:
-            video_title = video_dict["video_title"]
-            if difflib.SequenceMatcher(None, title, video_title).quick_ratio() >= 0.8:
-                return True
-            else:
-                pass
-
     # 获取 token
     @classmethod
     def get_token(cls, log_type, crawler, env):
         select_sql = f""" select * from crawler_config where source="{crawler}" and title LIKE "%公众号_2%";"""
         configs = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
         if len(configs) == 0:
-            # Common.logger(log_type, crawler).warning(f"公众号_2未配置token")
             Feishu.bot(log_type, crawler, "公众号_2:未配置token")
             time.sleep(60)
             return None
@@ -157,13 +43,78 @@ class GongzhonghaoAuthor2:
             "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(configs[0]["update_time"]/1000))),
             "operator": configs[0]["operator"]
         }
-        for k, v in token_dict.items():
-            print(f"{k}:{v}")
+        # for k, v in token_dict.items():
+        #     print(f"{k}:{v}")
         return token_dict
 
+    @classmethod
+    def get_users(cls, log_type, crawler, sheetid, env):
+        while True:
+            user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+            if user_sheet is None:
+                Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 2秒后重试")
+                time.sleep(2)
+                continue
+            user_list = []
+            len_sheet = len(user_sheet)
+            if len_sheet <= 101:
+                Common.logger(log_type, crawler).info("抓取用户数<=100,无需启动第二套抓取脚本\n")
+                return
+            if len_sheet >= 201:
+                len_sheet = 201
+            for i in range(101, len_sheet):
+            # for i in range(1, 3):
+                user_name = user_sheet[i][0]
+                wechat_name = user_sheet[i][2]
+                if wechat_name is None:
+                    wechat_name = user_name
+                our_uid = user_sheet[i][5]
+                our_user_link = user_sheet[i][6]
+                user_info_dict = cls.get_user_info(log_type=log_type, crawler=crawler, wechat_name=wechat_name, env=env)
+                out_uid = user_info_dict["user_id"]
+                avatar_url = user_info_dict["avatar_url"]
+                tag1 = user_sheet[i][7]
+                tag2 = user_sheet[i][8]
+                tag3 = user_sheet[i][9]
+                tag4 = user_sheet[i][10]
+                tag5 = user_sheet[i][11]
+                tag6 = user_sheet[i][12]
+                Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息")
+                if out_uid is None or our_uid is None:
+                    # 用来创建our_id的信息
+                    user_dict = {
+                        'recommendStatus': -6,
+                        'appRecommendStatus': -6,
+                        'nickName': user_info_dict["user_name"],
+                        'avatarUrl': user_info_dict['avatar_url'],
+                        'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5},{tag6}',
+                    }
+                    our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
+                    Common.logger(log_type, crawler).info(f'新创建的站内UID:{our_uid}')
+                    if env == 'prod':
+                        our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
+                    else:
+                        our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
+                    Feishu.update_values(log_type, crawler, sheetid, f'D{i + 1}:G{i + 1}', [
+                        [user_info_dict["user_id"], user_info_dict["avatar_url"], our_uid, our_user_link]])
+                    Common.logger(log_type, crawler).info(f'用户信息创建成功!\n')
+                else:
+                    Common.logger(log_type, crawler).info("用户信息已存在\n")
+                our_user_dict = {
+                    'user_name': user_name,
+                    'user_id': out_uid,
+                    'wechat_name': wechat_name,
+                    'our_uid': our_uid,
+                    'our_user_link': our_user_link,
+                    'avatar_url': avatar_url,
+                }
+                user_list.append(our_user_dict)
+
+            return user_list
+
     # 获取用户 fakeid
     @classmethod
-    def get_fakeid(cls, log_type, crawler, wechat_name, env):
+    def get_user_info(cls, log_type, crawler, wechat_name, env):
         while True:
             token_dict = cls.get_token(log_type, crawler, env)
             url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
@@ -201,7 +152,6 @@ class GongzhonghaoAuthor2:
             if r.json()["base_resp"]["err_msg"] == "invalid session":
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 过期啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
@@ -209,7 +159,6 @@ class GongzhonghaoAuthor2:
             if r.json()["base_resp"]["err_msg"] == "freq control":
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
@@ -217,21 +166,19 @@ class GongzhonghaoAuthor2:
             if "list" not in r.json() or len(r.json()["list"]) == 0:
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
                 continue
 
-            fakeid = r.json()["list"][0]["fakeid"]
-            head_url = r.json()["list"][0]["round_head_img"]
-            fakeid_dict = {'fakeid': fakeid, 'head_url': head_url}
-            return fakeid_dict
+            user_info_dict = {'user_name': r.json()["list"][0]["nickname"],
+                              'user_id': r.json()["list"][0]["fakeid"],
+                              'avatar_url': r.json()["list"][0]["round_head_img"]}
+            return user_info_dict
 
     # 获取腾讯视频下载链接
     @classmethod
     def get_tencent_video_url(cls, video_id):
-        # try:
         url = 'https://vv.video.qq.com/getinfo?vids=' + str(video_id) + '&platform=101001&charge=0&otype=json'
         response = requests.get(url=url).text.replace('QZOutputJson=', '').replace('"};', '"}')
         response = json.loads(response)
@@ -239,12 +186,9 @@ class GongzhonghaoAuthor2:
         fvkey = response['vl']['vi'][0]['fvkey']
         video_url = url + str(video_id) + '.mp4?vkey=' + fvkey
         return video_url
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error(f"get_tencent_video_url异常:{e}\n")
 
     @classmethod
     def get_video_url(cls, article_url, env):
-        # try:
         # 打印请求配置
         ca = DesiredCapabilities.CHROME
         ca["goog:loggingPrefs"] = {"performance": "ALL"}
@@ -260,11 +204,9 @@ class GongzhonghaoAuthor2:
         if env == "prod":
             driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
         else:
-            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
-                '/Users/wangkun/Downloads/chromedriver/chromedriver_v111/chromedriver'))
+            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service('/Users/wangkun/Downloads/chromedriver/chromedriver_v113/chromedriver'))
 
         driver.implicitly_wait(10)
-        # Common.logger(log_type, crawler).info('打开文章链接')
         driver.get(article_url)
         time.sleep(1)
 
@@ -280,19 +222,13 @@ class GongzhonghaoAuthor2:
             video_url = 0
         driver.quit()
         return video_url
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).info(f'get_video_url异常:{e}\n')
 
     # 获取文章列表
     @classmethod
-    def get_videoList(cls, log_type, crawler, wechat_name, rule_dict, user_name, uid, oss_endpoint, env):
-        # try:
+    def get_videoList(cls, log_type, crawler, rule_dict, user_dict, env):
+        begin = 0
         while True:
             token_dict = cls.get_token(log_type, crawler, env)
-            fakeid_dict = cls.get_fakeid(log_type=log_type,
-                                         crawler=crawler,
-                                         wechat_name=wechat_name,
-                                         env=env)
             url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
             headers = {
                 "accept": "*/*",
@@ -314,9 +250,9 @@ class GongzhonghaoAuthor2:
             }
             params = {
                 "action": "list_ex",
-                "begin": str(cls.begin),
+                "begin": str(begin),
                 "count": "5",
-                "fakeid": fakeid_dict['fakeid'],
+                "fakeid": user_dict['user_id'],
                 "type": "9",
                 "query": "",
                 "token": str(token_dict['token']),
@@ -330,7 +266,6 @@ class GongzhonghaoAuthor2:
             if r.json()["base_resp"]["err_msg"] == "invalid session":
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 过期啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
@@ -338,7 +273,6 @@ class GongzhonghaoAuthor2:
             if r.json()["base_resp"]["err_msg"] == "freq control":
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}, 操作人:{token_dict['operator']}, 更换日期:{token_dict['update_time']} 频控啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler,f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
@@ -346,7 +280,6 @@ class GongzhonghaoAuthor2:
             if 'app_msg_list' not in r.json():
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
@@ -355,75 +288,64 @@ class GongzhonghaoAuthor2:
                 Common.logger(log_type, crawler).info('没有更多视频了\n')
                 return
             else:
-                cls.begin += 5
+                begin += 5
                 app_msg_list = r.json()['app_msg_list']
-                for article_url in app_msg_list:
-                    # title
-                    video_title = article_url.get("title", "").replace('/', '').replace('\n', '') \
-                            .replace('.', '').replace('“', '').replace('”', '').replace(' ', '')\
-                            .replace('"', '').replace("'", "")
-                    # aid
-                    aid = article_url.get('aid', '')
-                    # create_time
-                    create_time = article_url.get('create_time', 0)
-                    publish_time_stamp = int(create_time)
-                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-                    avatar_url = fakeid_dict['head_url']
-                    # cover_url
-                    cover_url = article_url.get('cover', '')
-                    # article_url
-                    article_url = article_url.get('link', '')
-                    video_url = cls.get_video_url(article_url, env)
-
-                    video_dict = {
-                        'video_id': aid,
-                        'video_title': video_title,
-                        'publish_time_stamp': publish_time_stamp,
-                        'publish_time_str': publish_time_str,
-                        'user_name': user_name,
-                        'play_cnt': 0,
-                        'comment_cnt': 0,
-                        'like_cnt': 0,
-                        'share_cnt': 0,
-                        'user_id': fakeid_dict['fakeid'],
-                        'avatar_url': avatar_url,
-                        'cover_url': cover_url,
-                        'article_url': article_url,
-                        'video_url': video_url,
-                        'session': f'gongzhonghao-author1-{int(time.time())}'
-                    }
-                    for k, v in video_dict.items():
-                        Common.logger(log_type, crawler).info(f"{k}:{v}")
-
-                    if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('min', 1000)):
-                        Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('min', 1000))}天\n")
-                        cls.begin = 0
-                        return
-
-                    if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
-                        Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
-                    # 标题敏感词过滤
-                    elif any(str(word) if str(word) in video_dict['video_title'] else False
-                             for word in get_config_from_mysql(log_type=log_type,
-                                                               source=crawler,
-                                                               env=env,
-                                                               text="filter",
-                                                               action="")) is True:
-                        Common.logger(log_type, crawler).info("标题已中过滤词\n")
-                    # 已下载判断
-                    elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
-                        Common.logger(log_type, crawler).info("视频已下载\n")
-                    # 标题相似度
-                    elif cls.title_like(log_type, crawler, video_dict['video_title'], env) is True:
-                        Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
-                    else:
-                        cls.download_publish(log_type=log_type,
-                                             crawler=crawler,
-                                             video_dict=video_dict,
-                                             rule_dict=rule_dict,
-                                             uid=uid,
-                                             oss_endpoint=oss_endpoint,
-                                             env=env)
+                for article in app_msg_list:
+                    try:
+                        create_time = article.get('create_time', 0)
+                        publish_time_stamp = int(create_time)
+                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                        article_url = article.get('link', '')
+                        video_dict = {
+                            'video_id': article.get('aid', ''),
+                            'video_title': article.get("title", "").replace(' ', '').replace('"', '').replace("'", ""),
+                            'publish_time_stamp': publish_time_stamp,
+                            'publish_time_str': publish_time_str,
+                            'user_name': user_dict["user_name"],
+                            'play_cnt': 0,
+                            'comment_cnt': 0,
+                            'like_cnt': 0,
+                            'share_cnt': 0,
+                            'user_id': user_dict['user_id'],
+                            'avatar_url': user_dict['avatar_url'],
+                            'cover_url': article.get('cover', ''),
+                            'article_url': article.get('link', ''),
+                            'video_url': cls.get_video_url(article_url, env),
+                            'session': f'gongzhonghao-author1-{int(time.time())}'
+                        }
+                        for k, v in video_dict.items():
+                            Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                        if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('max', 1000)):
+                            Common.logger(log_type, crawler).info(
+                                f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
+                            return
+
+                        if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
+                            Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
+                        # 标题敏感词过滤
+                        elif any(str(word) if str(word) in video_dict['video_title'] else False
+                                 for word in get_config_from_mysql(log_type=log_type,
+                                                                   source=crawler,
+                                                                   env=env,
+                                                                   text="filter",
+                                                                   action="")) is True:
+                            Common.logger(log_type, crawler).info("标题已中过滤词\n")
+                        # 已下载判断
+                        elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                            Common.logger(log_type, crawler).info("视频已下载\n")
+                        # 标题相似度
+                        elif title_like(log_type, crawler, video_dict['video_title'], cls.platform, env) is True:
+                            Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
+                        else:
+                            cls.download_publish(log_type=log_type,
+                                                 crawler=crawler,
+                                                 video_dict=video_dict,
+                                                 rule_dict=rule_dict,
+                                                 user_dict=user_dict,
+                                                 env=env)
+                    except Exception as e:
+                        Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
 
                 Common.logger(log_type, crawler).info('休眠 60 秒\n')
                 time.sleep(60)
@@ -436,34 +358,30 @@ class GongzhonghaoAuthor2:
 
     # 下载/上传
     @classmethod
-    def download_publish(cls, log_type, crawler, video_dict, rule_dict, uid, oss_endpoint, env):
+    def download_publish(cls, log_type, crawler, video_dict, rule_dict, user_dict, env):
         # 下载视频
-        Common.download_method(log_type=log_type, crawler=crawler, text="video",
-                               title=video_dict["video_title"], url=video_dict["video_url"])
+        Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
         md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-        # 获取视频时长
-        ffmpeg_dict = Common.ffmpeg(log_type, crawler,
-                                    f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
-        if ffmpeg_dict is None:
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
             # 删除视频文件夹
             shutil.rmtree(f"./{crawler}/videos/{md_title}")
-            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
             return
+        # 获取视频时长
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
         video_dict["video_width"] = ffmpeg_dict["width"]
         video_dict["video_height"] = ffmpeg_dict["height"]
         video_dict["duration"] = ffmpeg_dict["duration"]
-        video_size = ffmpeg_dict["size"]
         Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
         Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
         Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
-        Common.logger(log_type, crawler).info(f'video_size:{video_size}')
-        # 视频size=0,直接删除
-        if int(video_size) == 0 or cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
-            # 删除视频文件夹
-            shutil.rmtree(f"./{crawler}/videos/{md_title}")
-            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
-            return
-        if cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
+        if download_rule(log_type, crawler, video_dict, rule_dict) is False:
             shutil.rmtree(f"./{crawler}/videos/{md_title}")
             Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
             return
@@ -476,22 +394,32 @@ class GongzhonghaoAuthor2:
         # 上传视频
         Common.logger(log_type, crawler).info("开始上传视频...")
         strategy = "定向榜爬虫策略"
-        our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                  crawler=crawler,
-                                                  strategy=strategy,
-                                                  our_uid=uid,
-                                                  oss_endpoint=oss_endpoint,
-                                                  env=env)
         if env == 'prod':
+            oss_endpoint = "inner"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy=strategy,
+                                                      our_uid=user_dict["our_uid"],
+                                                      oss_endpoint=oss_endpoint,
+                                                      env=env)
             our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
         else:
+            oss_endpoint = "out"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy=strategy,
+                                                      our_uid=user_dict["our_uid"],
+                                                      oss_endpoint=oss_endpoint,
+                                                      env=env)
             our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
-        Common.logger(log_type, crawler).info("视频上传完成")
 
         if our_video_id is None:
-            # 删除视频文件夹
-            shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-            return
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
 
         insert_sql = f""" insert into crawler_video(video_id,
                                                     out_user_id,
@@ -548,29 +476,23 @@ class GongzhonghaoAuthor2:
         Common.logger(log_type, crawler).info('视频下载/上传成功\n')
 
     @classmethod
-    def get_all_videos(cls, log_type, crawler, user_list, rule_dict, oss_endpoint, env):
-        if len(user_list) == 0:
+    def get_all_videos(cls, log_type, crawler, rule_dict, env):
+        user_list = cls.get_users(log_type, crawler, "Bzv72P", env)
+        if user_list is None or len(user_list) == 0:
             Common.logger(log_type, crawler).warning(f"抓取用户列表为空\n")
             return
-        for user in user_list:
-            # try:
-            user_name = user['nick_name']
-            wechat_name = user['link']
-            uid = user['uid']
-            Common.logger(log_type, crawler).info(f'获取 {user_name} 公众号视频\n')
-            cls.get_videoList(log_type=log_type,
-                              crawler=crawler,
-                              wechat_name=wechat_name,
-                              rule_dict=rule_dict,
-                              user_name=user_name,
-                              uid=uid,
-                              oss_endpoint=oss_endpoint,
-                              env=env)
-            cls.begin = 0
-            Common.logger(log_type, crawler).info('休眠 60 秒\n')
-            time.sleep(60)
-            # except Exception as e:
-            #     Common.logger(log_type, crawler).info(f'get_all_videos异常:{e}\n')
+        for user_dict in user_list:
+            try:
+                Common.logger(log_type, crawler).info(f'获取 {user_dict["user_name"]} 公众号视频\n')
+                cls.get_videoList(log_type=log_type,
+                                  crawler=crawler,
+                                  rule_dict=rule_dict,
+                                  user_dict=user_dict,
+                                  env=env)
+                Common.logger(log_type, crawler).info('休眠 60 秒\n')
+                time.sleep(60)
+            except Exception as e:
+                Common.logger(log_type, crawler).info(f'抓取{user_dict["user_name"]}公众号时异常:{e}\n')
 
 
 if __name__ == "__main__":

+ 187 - 252
gongzhonghao/gongzhonghao_author/gongzhonghao3_author.py

@@ -2,7 +2,6 @@
 # @Author: wangkun
 # @Time: 2023/3/28
 import datetime
-import difflib
 import json
 import os
 import shutil
@@ -17,136 +16,23 @@ from selenium.webdriver.common.by import By
 from selenium import webdriver
 sys.path.append(os.getcwd())
 from common.common import Common
+from common.getuser import getUser
 from common.feishu import Feishu
 from common.publish import Publish
 from common.scheduling_db import MysqlHelper
-from common.public import get_config_from_mysql
+from common.public import get_config_from_mysql, title_like, download_rule
 
 
 class GongzhonghaoAuthor3:
-    # 翻页参数
-    begin = 0
     platform = "公众号"
 
-    # 基础门槛规则
-    @staticmethod
-    def download_rule(log_type, crawler, video_dict, rule_dict):
-        """
-        下载视频的基本规则
-        :param log_type: 日志
-        :param crawler: 哪款爬虫
-        :param video_dict: 视频信息,字典格式
-        :param rule_dict: 规则信息,字典格式
-        :return: 满足规则,返回 True;反之,返回 False
-        """
-        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
-        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
-        if rule_play_cnt_max == 0:
-            rule_play_cnt_max = 100000000
-
-        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
-        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
-        if rule_duration_max == 0:
-            rule_duration_max = 100000000
-
-        rule_period_min = rule_dict.get('period', {}).get('min', 0)
-        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
-        # if rule_period_max == 0:
-        #     rule_period_max = 100000000
-
-        rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
-        rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
-        if rule_fans_cnt_max == 0:
-            rule_fans_cnt_max = 100000000
-
-        rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
-        rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
-        if rule_videos_cnt_max == 0:
-            rule_videos_cnt_max = 100000000
-
-        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
-        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
-        if rule_like_cnt_max == 0:
-            rule_like_cnt_max = 100000000
-
-        rule_width_min = rule_dict.get('width', {}).get('min', 0)
-        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
-        if rule_width_max == 0:
-            rule_width_max = 100000000
-
-        rule_height_min = rule_dict.get('height', {}).get('min', 0)
-        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
-        if rule_height_max == 0:
-            rule_height_max = 100000000
-
-        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
-        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
-        if rule_share_cnt_max == 0:
-            rule_share_cnt_max = 100000000
-
-        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
-        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
-        if rule_comment_cnt_max == 0:
-            rule_comment_cnt_max = 100000000
-
-        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
-        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 0)
-        if rule_publish_time_max == 0:
-            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
-
-        Common.logger(log_type, crawler).info(
-            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
-
-        if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
-                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
-                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
-                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
-                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
-                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
-                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
-                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min) \
-                and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min):
-            return True
-        else:
-            return False
-
-    @classmethod
-    def title_like(cls, log_type, crawler, title, env):
-        select_sql = f""" select * from crawler_video where platform="公众号" """
-        video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
-        if len(video_list) == 0:
-            return None
-        for video_dict in video_list:
-            video_title = video_dict["video_title"]
-            if difflib.SequenceMatcher(None, title, video_title).quick_ratio() >= 0.8:
-                return True
-            else:
-                pass
-
     # 获取 token
     @classmethod
     def get_token(cls, log_type, crawler, env):
         select_sql = f""" select * from crawler_config where source="{crawler}" and title LIKE "%公众号_3%";"""
         configs = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
         if len(configs) == 0:
-            # Common.logger(log_type, crawler).warning(f"公众号_3未配置token")
-            Feishu.bot(log_type, crawler, "公众号_3:未配置token")
+            Feishu.bot(log_type, crawler, "公众号_3:未配置token\n请登录后配置\nhttps://admin.piaoquantv.com/cms/spider-source-config")
             time.sleep(60)
             return None
         token_dict = {
@@ -157,13 +43,78 @@ class GongzhonghaoAuthor3:
             "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(configs[0]["update_time"]/1000))),
             "operator": configs[0]["operator"]
         }
-        for k, v in token_dict.items():
-            print(f"{k}:{v}")
+        # for k, v in token_dict.items():
+        #     print(f"{k}:{v}")
         return token_dict
 
+    @classmethod
+    def get_users(cls, log_type, crawler, sheetid, env):
+        while True:
+            user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+            if user_sheet is None:
+                Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 2秒后重试")
+                time.sleep(2)
+                continue
+            user_list = []
+            len_sheet = len(user_sheet)
+            if len_sheet <= 201:
+                Common.logger(log_type, crawler).info("抓取用户数<=200,无需启动第三套抓取脚本\n")
+                return
+            if len_sheet >= 301:
+                len_sheet = 301
+            for i in range(201, len_sheet):
+            # for i in range(1, 3):
+                user_name = user_sheet[i][0]
+                wechat_name = user_sheet[i][2]
+                if wechat_name is None:
+                    wechat_name = user_name
+                our_uid = user_sheet[i][5]
+                our_user_link = user_sheet[i][6]
+                user_info_dict = cls.get_user_info(log_type=log_type, crawler=crawler, wechat_name=wechat_name, env=env)
+                out_uid = user_info_dict["user_id"]
+                avatar_url = user_info_dict["avatar_url"]
+                tag1 = user_sheet[i][7]
+                tag2 = user_sheet[i][8]
+                tag3 = user_sheet[i][9]
+                tag4 = user_sheet[i][10]
+                tag5 = user_sheet[i][11]
+                tag6 = user_sheet[i][12]
+                Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息")
+                if out_uid is None or our_uid is None:
+                    # 用来创建our_id的信息
+                    user_dict = {
+                        'recommendStatus': -6,
+                        'appRecommendStatus': -6,
+                        'nickName': user_info_dict["user_name"],
+                        'avatarUrl': user_info_dict['avatar_url'],
+                        'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5},{tag6}',
+                    }
+                    our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
+                    Common.logger(log_type, crawler).info(f'新创建的站内UID:{our_uid}')
+                    if env == 'prod':
+                        our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
+                    else:
+                        our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
+                    Feishu.update_values(log_type, crawler, sheetid, f'D{i + 1}:G{i + 1}', [
+                        [user_info_dict["user_id"], user_info_dict["avatar_url"], our_uid, our_user_link]])
+                    Common.logger(log_type, crawler).info(f'用户信息创建成功!\n')
+                else:
+                    Common.logger(log_type, crawler).info("用户信息已存在\n")
+                our_user_dict = {
+                    'user_name': user_name,
+                    'user_id': out_uid,
+                    'wechat_name': wechat_name,
+                    'our_uid': our_uid,
+                    'our_user_link': our_user_link,
+                    'avatar_url': avatar_url,
+                }
+                user_list.append(our_user_dict)
+
+            return user_list
+
     # 获取用户 fakeid
     @classmethod
-    def get_fakeid(cls, log_type, crawler, wechat_name, env):
+    def get_user_info(cls, log_type, crawler, wechat_name, env):
         while True:
             token_dict = cls.get_token(log_type, crawler, env)
             url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
@@ -223,10 +174,10 @@ class GongzhonghaoAuthor3:
                 time.sleep(60 * 10)
                 continue
 
-            fakeid = r.json()["list"][0]["fakeid"]
-            head_url = r.json()["list"][0]["round_head_img"]
-            fakeid_dict = {'fakeid': fakeid, 'head_url': head_url}
-            return fakeid_dict
+            user_info_dict = {'user_name': r.json()["list"][0]["nickname"],
+                              'user_id': r.json()["list"][0]["fakeid"],
+                              'avatar_url': r.json()["list"][0]["round_head_img"]}
+            return user_info_dict
 
     # 获取腾讯视频下载链接
     @classmethod
@@ -257,7 +208,7 @@ class GongzhonghaoAuthor3:
             driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
         else:
             driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
-                '/Users/wangkun/Downloads/chromedriver/chromedriver_v111/chromedriver'))
+                '/Users/wangkun/Downloads/chromedriver/chromedriver_v113/chromedriver'))
 
         driver.implicitly_wait(10)
         # Common.logger(log_type, crawler).info('打开文章链接')
@@ -279,14 +230,10 @@ class GongzhonghaoAuthor3:
 
     # 获取文章列表
     @classmethod
-    def get_videoList(cls, log_type, crawler, wechat_name, rule_dict, user_name, uid, oss_endpoint, env):
-        # try:
+    def get_videoList(cls, log_type, crawler, rule_dict, user_dict, env):
+        begin = 0
         while True:
             token_dict = cls.get_token(log_type, crawler, env)
-            fakeid_dict = cls.get_fakeid(log_type=log_type,
-                                         crawler=crawler,
-                                         wechat_name=wechat_name,
-                                         env=env)
             url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
             headers = {
                 "accept": "*/*",
@@ -308,9 +255,9 @@ class GongzhonghaoAuthor3:
             }
             params = {
                 "action": "list_ex",
-                "begin": str(cls.begin),
+                "begin": str(begin),
                 "count": "5",
-                "fakeid": fakeid_dict['fakeid'],
+                "fakeid": user_dict['user_id'],
                 "type": "9",
                 "query": "",
                 "token": str(token_dict['token']),
@@ -324,7 +271,6 @@ class GongzhonghaoAuthor3:
             if r.json()["base_resp"]["err_msg"] == "invalid session":
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 过期啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
@@ -332,7 +278,6 @@ class GongzhonghaoAuthor3:
             if r.json()["base_resp"]["err_msg"] == "freq control":
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}, 操作人:{token_dict['operator']}, 更换日期:{token_dict['update_time']} 频控啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler,f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
@@ -340,7 +285,6 @@ class GongzhonghaoAuthor3:
             if 'app_msg_list' not in r.json():
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
                 Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
-                # Common.logger(log_type, crawler).warning(f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} 频控啦\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 10)
@@ -349,75 +293,65 @@ class GongzhonghaoAuthor3:
                 Common.logger(log_type, crawler).info('没有更多视频了\n')
                 return
             else:
-                cls.begin += 5
+                begin += 5
                 app_msg_list = r.json()['app_msg_list']
-                for article_url in app_msg_list:
-                    # title
-                    video_title = article_url.get("title", "").replace('/', '').replace('\n', '') \
-                            .replace('.', '').replace('“', '').replace('”', '').replace(' ', '')\
-                            .replace('"', '').replace("'", "")
-                    # aid
-                    aid = article_url.get('aid', '')
-                    # create_time
-                    create_time = article_url.get('create_time', 0)
-                    publish_time_stamp = int(create_time)
-                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-                    avatar_url = fakeid_dict['head_url']
-                    # cover_url
-                    cover_url = article_url.get('cover', '')
-                    # article_url
-                    article_url = article_url.get('link', '')
-                    video_url = cls.get_video_url(article_url, env)
-
-                    video_dict = {
-                        'video_id': aid,
-                        'video_title': video_title,
-                        'publish_time_stamp': publish_time_stamp,
-                        'publish_time_str': publish_time_str,
-                        'user_name': user_name,
-                        'play_cnt': 0,
-                        'comment_cnt': 0,
-                        'like_cnt': 0,
-                        'share_cnt': 0,
-                        'user_id': fakeid_dict['fakeid'],
-                        'avatar_url': avatar_url,
-                        'cover_url': cover_url,
-                        'article_url': article_url,
-                        'video_url': video_url,
-                        'session': f'gongzhonghao-author1-{int(time.time())}'
-                    }
-                    for k, v in video_dict.items():
-                        Common.logger(log_type, crawler).info(f"{k}:{v}")
-
-                    if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('min', 1000)):
-                        Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('min', 1000))}天\n")
-                        cls.begin = 0
-                        return
-
-                    if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
-                        Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
-                    # 标题敏感词过滤
-                    elif any(str(word) if str(word) in video_dict['video_title'] else False
-                             for word in get_config_from_mysql(log_type=log_type,
-                                                               source=crawler,
-                                                               env=env,
-                                                               text="filter",
-                                                               action="")) is True:
-                        Common.logger(log_type, crawler).info("标题已中过滤词\n")
-                    # 已下载判断
-                    elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
-                        Common.logger(log_type, crawler).info("视频已下载\n")
-                    # 标题相似度
-                    elif cls.title_like(log_type, crawler, video_dict['video_title'], env) is True:
-                        Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
-                    else:
-                        cls.download_publish(log_type=log_type,
-                                             crawler=crawler,
-                                             video_dict=video_dict,
-                                             rule_dict=rule_dict,
-                                             uid=uid,
-                                             oss_endpoint=oss_endpoint,
-                                             env=env)
+                for article in app_msg_list:
+                    try:
+                        create_time = article.get('create_time', 0)
+                        publish_time_stamp = int(create_time)
+                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                        article_url = article.get('link', '')
+                        video_dict = {
+                            'video_id': article.get('aid', ''),
+                            'video_title': article.get("title", "").replace(' ', '').replace('"', '').replace("'", ""),
+                            'publish_time_stamp': publish_time_stamp,
+                            'publish_time_str': publish_time_str,
+                            'user_name': user_dict["user_name"],
+                            'play_cnt': 0,
+                            'comment_cnt': 0,
+                            'like_cnt': 0,
+                            'share_cnt': 0,
+                            'user_id': user_dict['user_id'],
+                            'avatar_url': user_dict['avatar_url'],
+                            'cover_url': article.get('cover', ''),
+                            'article_url': article.get('link', ''),
+                            'video_url': cls.get_video_url(article_url, env),
+                            'session': f'gongzhonghao-author1-{int(time.time())}'
+                        }
+                        for k, v in video_dict.items():
+                            Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                        if int(time.time()) - publish_time_stamp > 3600 * 24 * int(
+                                rule_dict.get('period', {}).get('max', 1000)):
+                            Common.logger(log_type, crawler).info(
+                                f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
+                            return
+
+                        if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
+                            Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
+                        # 标题敏感词过滤
+                        elif any(str(word) if str(word) in video_dict['video_title'] else False
+                                 for word in get_config_from_mysql(log_type=log_type,
+                                                                   source=crawler,
+                                                                   env=env,
+                                                                   text="filter",
+                                                                   action="")) is True:
+                            Common.logger(log_type, crawler).info("标题已中过滤词\n")
+                        # 已下载判断
+                        elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                            Common.logger(log_type, crawler).info("视频已下载\n")
+                        # 标题相似度
+                        elif title_like(log_type, crawler, video_dict['video_title'], cls.platform, env) is True:
+                            Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
+                        else:
+                            cls.download_publish(log_type=log_type,
+                                                 crawler=crawler,
+                                                 video_dict=video_dict,
+                                                 rule_dict=rule_dict,
+                                                 user_dict=user_dict,
+                                                 env=env)
+                    except Exception as e:
+                        Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
 
                 Common.logger(log_type, crawler).info('休眠 60 秒\n')
                 time.sleep(60)
@@ -430,34 +364,30 @@ class GongzhonghaoAuthor3:
 
     # 下载/上传
     @classmethod
-    def download_publish(cls, log_type, crawler, video_dict, rule_dict, uid, oss_endpoint, env):
+    def download_publish(cls, log_type, crawler, video_dict, rule_dict, user_dict, env):
         # 下载视频
-        Common.download_method(log_type=log_type, crawler=crawler, text="video",
-                               title=video_dict["video_title"], url=video_dict["video_url"])
+        Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
         md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-        # 获取视频时长
-        ffmpeg_dict = Common.ffmpeg(log_type, crawler,
-                                    f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
-        if ffmpeg_dict is None:
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
             # 删除视频文件夹
             shutil.rmtree(f"./{crawler}/videos/{md_title}")
-            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
             return
+        # 获取视频时长
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
         video_dict["video_width"] = ffmpeg_dict["width"]
         video_dict["video_height"] = ffmpeg_dict["height"]
         video_dict["duration"] = ffmpeg_dict["duration"]
-        video_size = ffmpeg_dict["size"]
         Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
         Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
         Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
-        Common.logger(log_type, crawler).info(f'video_size:{video_size}')
-        # 视频size=0,直接删除
-        if int(video_size) == 0 or cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
-            # 删除视频文件夹
-            shutil.rmtree(f"./{crawler}/videos/{md_title}")
-            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
-            return
-        if cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
+        if download_rule(log_type, crawler, video_dict, rule_dict) is False:
             shutil.rmtree(f"./{crawler}/videos/{md_title}")
             Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
             return
@@ -470,22 +400,32 @@ class GongzhonghaoAuthor3:
         # 上传视频
         Common.logger(log_type, crawler).info("开始上传视频...")
         strategy = "定向榜爬虫策略"
-        our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                  crawler=crawler,
-                                                  strategy=strategy,
-                                                  our_uid=uid,
-                                                  oss_endpoint=oss_endpoint,
-                                                  env=env)
         if env == 'prod':
+            oss_endpoint = "inner"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy=strategy,
+                                                      our_uid=user_dict["our_uid"],
+                                                      oss_endpoint=oss_endpoint,
+                                                      env=env)
             our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
         else:
+            oss_endpoint = "out"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy=strategy,
+                                                      our_uid=user_dict["our_uid"],
+                                                      oss_endpoint=oss_endpoint,
+                                                      env=env)
             our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
-        Common.logger(log_type, crawler).info("视频上传完成")
 
         if our_video_id is None:
-            # 删除视频文件夹
-            shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-            return
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
 
         insert_sql = f""" insert into crawler_video(video_id,
                                                     out_user_id,
@@ -542,29 +482,24 @@ class GongzhonghaoAuthor3:
         Common.logger(log_type, crawler).info('视频下载/上传成功\n')
 
     @classmethod
-    def get_all_videos(cls, log_type, crawler, user_list, rule_dict, oss_endpoint, env):
-        if len(user_list) == 0:
+    def get_all_videos(cls, log_type, crawler, rule_dict, env):
+        user_list = cls.get_users(log_type, crawler, "Bzv72P", env)
+        # Common.logger(log_type, crawler).info(f"user_list:{user_list}")
+        if user_list is None or len(user_list) == 0:
             Common.logger(log_type, crawler).warning(f"抓取用户列表为空\n")
             return
-        for user in user_list:
-            # try:
-            user_name = user['nick_name']
-            wechat_name = user['link']
-            uid = user['uid']
-            Common.logger(log_type, crawler).info(f'获取 {user_name} 公众号视频\n')
-            cls.get_videoList(log_type=log_type,
-                              crawler=crawler,
-                              wechat_name=wechat_name,
-                              rule_dict=rule_dict,
-                              user_name=user_name,
-                              uid=uid,
-                              oss_endpoint=oss_endpoint,
-                              env=env)
-            cls.begin = 0
-            Common.logger(log_type, crawler).info('休眠 60 秒\n')
-            time.sleep(60)
-            # except Exception as e:
-            #     Common.logger(log_type, crawler).info(f'get_all_videos异常:{e}\n')
+        for user_dict in user_list:
+            try:
+                Common.logger(log_type, crawler).info(f'获取 {user_dict["user_name"]} 公众号视频\n')
+                cls.get_videoList(log_type=log_type,
+                                  crawler=crawler,
+                                  rule_dict=rule_dict,
+                                  user_dict=user_dict,
+                                  env=env)
+                Common.logger(log_type, crawler).info('休眠 60 秒\n')
+                time.sleep(60)
+            except Exception as e:
+                Common.logger(log_type, crawler).info(f'抓取{user_dict["user_name"]}公众号时异常:{e}\n')
 
 
 if __name__ == "__main__":

BIN
gongzhonghao/gongzhonghao_follow/.DS_Store


+ 3 - 12
gongzhonghao/gongzhonghao_main/run_gongzhonghao1_author_scheduling.py

@@ -7,25 +7,18 @@ import sys
 sys.path.append(os.getcwd())
 from common.public import task_fun
 from common.common import Common
-from common.scheduling_db import MysqlHelper
 from gongzhonghao.gongzhonghao_author.gongzhonghao1_author import GongzhonghaoAuthor1
 
 
-def main(log_type, crawler, task, oss_endpoint, env):
+def main(log_type, crawler, task, env):
     task_dict = task_fun(task)['task_dict']
     rule_dict = task_fun(task)['rule_dict']
-    task_id = task_dict['task_id']
-    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
-    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
-    Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
-    Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
-    Common.logger(log_type, crawler).info(f"用户列表:\n{user_list}")
+    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}\n")
     Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]} 定向榜\n')
     GongzhonghaoAuthor1.get_all_videos(log_type=log_type,
                                         crawler=crawler,
-                                        user_list=user_list,
                                         rule_dict=rule_dict,
-                                        oss_endpoint=oss_endpoint,
                                         env=env)
     Common.del_logs(log_type, crawler)
     Common.logger(log_type, crawler).info('抓取完一轮\n')
@@ -36,11 +29,9 @@ if __name__ == "__main__":
     parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
     parser.add_argument('--crawler')  ## 添加参数
     parser.add_argument('--task')  ## 添加参数
-    parser.add_argument('--oss_endpoint')  ## 添加参数
     parser.add_argument('--env')  ## 添加参数
     args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
     main(log_type=args.log_type,
          crawler=args.crawler,
          task=args.task,
-         oss_endpoint=args.oss_endpoint,
          env=args.env)

+ 3 - 12
gongzhonghao/gongzhonghao_main/run_gongzhonghao2_author_scheduling.py

@@ -7,25 +7,18 @@ import sys
 sys.path.append(os.getcwd())
 from common.public import task_fun
 from common.common import Common
-from common.scheduling_db import MysqlHelper
 from gongzhonghao.gongzhonghao_author.gongzhonghao2_author import GongzhonghaoAuthor2
 
 
-def main(log_type, crawler, task, oss_endpoint, env):
+def main(log_type, crawler, task, env):
     task_dict = task_fun(task)['task_dict']
     rule_dict = task_fun(task)['rule_dict']
-    task_id = task_dict['task_id']
-    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
-    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
-    Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
-    Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
-    Common.logger(log_type, crawler).info(f"用户列表:\n{user_list}")
+    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}\n")
     Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]} 定向榜\n')
     GongzhonghaoAuthor2.get_all_videos(log_type=log_type,
                                         crawler=crawler,
-                                        user_list=user_list,
                                         rule_dict=rule_dict,
-                                        oss_endpoint=oss_endpoint,
                                         env=env)
     Common.del_logs(log_type, crawler)
     Common.logger(log_type, crawler).info('抓取完一轮\n')
@@ -36,11 +29,9 @@ if __name__ == "__main__":
     parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
     parser.add_argument('--crawler')  ## 添加参数
     parser.add_argument('--task')  ## 添加参数
-    parser.add_argument('--oss_endpoint')  ## 添加参数
     parser.add_argument('--env')  ## 添加参数
     args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
     main(log_type=args.log_type,
          crawler=args.crawler,
          task=args.task,
-         oss_endpoint=args.oss_endpoint,
          env=args.env)

+ 6 - 15
gongzhonghao/gongzhonghao_main/run_gongzhonghao3_author_scheduling.py

@@ -7,26 +7,19 @@ import sys
 sys.path.append(os.getcwd())
 from common.public import task_fun
 from common.common import Common
-from common.scheduling_db import MysqlHelper
 from gongzhonghao.gongzhonghao_author.gongzhonghao3_author import GongzhonghaoAuthor3
 
 
-def main(log_type, crawler, task, oss_endpoint, env):
+def main(log_type, crawler, task, env):
     task_dict = task_fun(task)['task_dict']
     rule_dict = task_fun(task)['rule_dict']
-    task_id = task_dict['task_id']
-    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
-    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
-    Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
-    Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
-    Common.logger(log_type, crawler).info(f"用户列表:\n{user_list}")
+    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}\n")
     Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]} 定向榜\n')
     GongzhonghaoAuthor3.get_all_videos(log_type=log_type,
-                                        crawler=crawler,
-                                        user_list=user_list,
-                                        rule_dict=rule_dict,
-                                        oss_endpoint=oss_endpoint,
-                                        env=env)
+                                       crawler=crawler,
+                                       rule_dict=rule_dict,
+                                       env=env)
     Common.del_logs(log_type, crawler)
     Common.logger(log_type, crawler).info('抓取完一轮\n')
 
@@ -36,11 +29,9 @@ if __name__ == "__main__":
     parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
     parser.add_argument('--crawler')  ## 添加参数
     parser.add_argument('--task')  ## 添加参数
-    parser.add_argument('--oss_endpoint')  ## 添加参数
     parser.add_argument('--env')  ## 添加参数
     args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
     main(log_type=args.log_type,
          crawler=args.crawler,
          task=args.task,
-         oss_endpoint=args.oss_endpoint,
          env=args.env)

BIN
gongzhonghao/logs/.DS_Store


BIN
gongzhonghao/videos/.DS_Store


BIN
jixiangxingfu/.DS_Store


BIN
jixiangxingfu/jixiangxingfu_recommend/.DS_Store


BIN
jixiangxingfu/videos/.DS_Store


BIN
kuaishou/kuaishou_follow/.DS_Store


BIN
kuaishou/logs/.DS_Store


BIN
main/.DS_Store


BIN
main/main_logs/.DS_Store


BIN
scheduling/logs/.DS_Store


BIN
shipinhao/shipinhao_search/.DS_Store


BIN
suisuiniannianyingfuqi/logs/.DS_Store


BIN
weixinzhishu/logs/.DS_Store


BIN
weixinzhishu/weixinzhishu_hot_search/.DS_Store


BIN
weixinzhishu/weixinzhishu_main/.DS_Store


BIN
xiaoniangao/.DS_Store


BIN
xiaoniangao/logs/.DS_Store


BIN
xiaoniangao/videos/.DS_Store


BIN
xiaoniangao/xiaoniangao_follow/.DS_Store


BIN
xigua/logs/.DS_Store


BIN
xigua/xigua_follow/.DS_Store


BIN
youtube/.DS_Store


BIN
youtube/logs/.DS_Store


BIN
youtube/videos/.DS_Store


BIN
youtube/youtube_follow/.DS_Store


BIN
zhiqingtiantiankan/.DS_Store


BIN
zhiqingtiantiankan/logs/.DS_Store


BIN
zhiqingtiantiankan/zhiqingtiantiankan_recommend/.DS_Store


BIN
zhongmiaoyinxin/.DS_Store


BIN
zhongmiaoyinxin/logs/.DS_Store


BIN
zhongmiaoyinxin/zhongmiaoyinxin_recommend/.DS_Store