wangkun 2 yıl önce
ebeveyn
işleme
447a6cb262

+ 125 - 235
xiaoniangao/xiaoniangao_author/xiaoniangao_author_scheduling.py

@@ -7,6 +7,7 @@ import random
 import shutil
 import shutil
 import sys
 import sys
 import time
 import time
+from hashlib import md5
 import requests
 import requests
 import urllib3
 import urllib3
 sys.path.append(os.getcwd())
 sys.path.append(os.getcwd())
@@ -14,122 +15,23 @@ from common.common import Common
 from common.scheduling_db import MysqlHelper
 from common.scheduling_db import MysqlHelper
 from common.publish import Publish
 from common.publish import Publish
 from common.feishu import Feishu
 from common.feishu import Feishu
-from common.public import get_config_from_mysql
+from common.public import get_config_from_mysql, download_rule
 proxies = {"http": None, "https": None}
 proxies = {"http": None, "https": None}
 
 
 
 
 class XiaoniangaoAuthorScheduling:
 class XiaoniangaoAuthorScheduling:
     platform = "小年糕"
     platform = "小年糕"
-    # 小程序个人主页视频列表翻页参数
-    next_t = None
-
-    # 基础门槛规则
-    @staticmethod
-    def download_rule(log_type, crawler, video_dict, rule_dict):
-        """
-        下载视频的基本规则
-        :param log_type: 日志
-        :param crawler: 哪款爬虫
-        :param video_dict: 视频信息,字典格式
-        :param rule_dict: 规则信息,字典格式
-        :return: 满足规则,返回 True;反之,返回 False
-        """
-        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
-        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
-        if rule_play_cnt_max == 0:
-            rule_play_cnt_max = 100000000
-
-        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
-        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
-        if rule_duration_max == 0:
-            rule_duration_max = 100000000
-
-        rule_period_min = rule_dict.get('period', {}).get('min', 0)
-        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
-        # if rule_period_max == 0:
-        #     rule_period_max = 100000000
-
-        rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
-        rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
-        if rule_fans_cnt_max == 0:
-            rule_fans_cnt_max = 100000000
-
-        rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
-        rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
-        if rule_videos_cnt_max == 0:
-            rule_videos_cnt_max = 100000000
-
-        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
-        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
-        if rule_like_cnt_max == 0:
-            rule_like_cnt_max = 100000000
-
-        rule_width_min = rule_dict.get('width', {}).get('min', 0)
-        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
-        if rule_width_max == 0:
-            rule_width_max = 100000000
-
-        rule_height_min = rule_dict.get('height', {}).get('min', 0)
-        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
-        if rule_height_max == 0:
-            rule_height_max = 100000000
-
-        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
-        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
-        if rule_share_cnt_max == 0:
-            rule_share_cnt_max = 100000000
-
-        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
-        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
-        if rule_comment_cnt_max == 0:
-            rule_comment_cnt_max = 100000000
-
-        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
-        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 0)
-        if rule_publish_time_max == 0:
-            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
-
-        Common.logger(log_type, crawler).info(
-            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
-
-        if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
-                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
-                and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min) \
-                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
-                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
-                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
-                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
-                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
-                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min):
-            return True
-        else:
-            return False
 
 
     @classmethod
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
     def repeat_video(cls, log_type, crawler, video_id, env):
-        sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
         repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         return len(repeat_video)
         return len(repeat_video)
 
 
     # 获取个人主页视频
     # 获取个人主页视频
     @classmethod
     @classmethod
-    def get_videoList(cls, log_type, crawler, strategy, p_mid, uid, rule_dict, oss_endpoint, env):
+    def get_videoList(cls, log_type, crawler, rule_dict, user_dict, env):
+        next_t = None
         while True:
         while True:
             url = "https://api.xiaoniangao.cn/profile/list_album"
             url = "https://api.xiaoniangao.cn/profile/list_album"
             headers = {
             headers = {
@@ -144,8 +46,8 @@ class XiaoniangaoAuthorScheduling:
                 "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/654/page-frame.html'
                 "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/654/page-frame.html'
             }
             }
             json_text = {
             json_text = {
-                "visited_mid": str(p_mid),
-                "start_t": cls.next_t,
+                "visited_mid": str(user_dict['link']),
+                "start_t": next_t,
                 "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!690x385r/crop/690x385/interlace/1/format/jpg",
                 "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!690x385r/crop/690x385/interlace/1/format/jpg",
                 "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!120x120r/crop/120x120/interlace/1/format/jpg",
                 "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!120x120r/crop/120x120/interlace/1/format/jpg",
                 "limit": 20,
                 "limit": 20,
@@ -179,140 +81,140 @@ class XiaoniangaoAuthorScheduling:
             r = requests.post(url=url, headers=headers, json=json_text, proxies=proxies, verify=False)
             r = requests.post(url=url, headers=headers, json=json_text, proxies=proxies, verify=False)
             if 'data' not in r.text or r.status_code != 200:
             if 'data' not in r.text or r.status_code != 200:
                 Common.logger(log_type, crawler).info(f"get_videoList:{r.text}\n")
                 Common.logger(log_type, crawler).info(f"get_videoList:{r.text}\n")
-                cls.next_t = None
                 return
                 return
             elif 'list' not in r.json()['data']:
             elif 'list' not in r.json()['data']:
                 Common.logger(log_type, crawler).info(f"get_videoList:{r.json()}\n")
                 Common.logger(log_type, crawler).info(f"get_videoList:{r.json()}\n")
-                cls.next_t = None
                 return
                 return
             elif len(r.json()['data']['list']) == 0:
             elif len(r.json()['data']['list']) == 0:
                 Common.logger(log_type, crawler).info(f"没有更多数据啦~\n")
                 Common.logger(log_type, crawler).info(f"没有更多数据啦~\n")
-                cls.next_t = None
                 return
                 return
             else:
             else:
-                cls.next_t = r.json()["data"]["next_t"]
+                next_t = r.json()["data"]["next_t"]
                 feeds = r.json()["data"]["list"]
                 feeds = r.json()["data"]["list"]
                 for i in range(len(feeds)):
                 for i in range(len(feeds)):
-                    # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
-                    xiaoniangao_title = feeds[i].get("title", "").strip().replace("\n", "") \
-                        .replace("/", "").replace("\r", "").replace("#", "") \
-                        .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
-                        .replace(":", "").replace("*", "").replace("?", "") \
-                        .replace("?", "").replace('"', "").replace("<", "") \
-                        .replace(">", "").replace("|", "").replace(" ", "") \
-                        .replace('"', '').replace("'", '')
-                    # 随机取一个表情/符号
-                    emoji = random.choice(get_config_from_mysql(log_type, crawler, env, "emoji"))
-                    # 生成最终标题,标题list[表情+title, title+表情]随机取一个
-                    video_title = random.choice([f"{emoji}{xiaoniangao_title}", f"{xiaoniangao_title}{emoji}"])
-                    # 视频 ID
-                    video_id = feeds[i].get("vid", "")
-                    # 播放量
-                    play_cnt = feeds[i].get("play_pv", 0)
-                    # 点赞量
-                    like_cnt = feeds[i].get("favor", {}).get("total", 0)
-                    # 评论数
-                    comment_cnt = feeds[i].get("comment_count", 0)
-                    # 分享量
-                    share_cnt = feeds[i].get("share", 0)
-                    # 时长
-                    duration = int(feeds[i].get("du", 0) / 1000)
-                    # 宽和高
-                    video_width = int(feeds[i].get("w", 0))
-                    video_height = int(feeds[i].get("h", 0))
-                    # 发布时间
-                    publish_time_stamp = int(int(feeds[i].get("t", 0)) / 1000)
-                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-                    # 用户名 / 头像
-                    user_name = feeds[i].get("album_user", {}).get("nick", "").strip().replace("\n", "") \
-                        .replace("/", "").replace("快手", "").replace(" ", "") \
-                        .replace(" ", "").replace("&NBSP", "").replace("\r", "")
-                    avatar_url = feeds[i].get("album_user", {}).get("hurl", "")
-                    # 用户 ID
-                    profile_id = feeds[i]["id"]
-                    # 用户 mid
-                    profile_mid = feeds[i]["mid"]
-                    # 视频封面
-                    cover_url = feeds[i].get("url", "")
-                    # 视频播放地址
-                    video_url = feeds[i].get("v_url", "")
+                    try:
+                        # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
+                        xiaoniangao_title = feeds[i].get("title", "").strip().replace("\n", "") \
+                            .replace("/", "").replace("\r", "").replace("#", "") \
+                            .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
+                            .replace(":", "").replace("*", "").replace("?", "") \
+                            .replace("?", "").replace('"', "").replace("<", "") \
+                            .replace(">", "").replace("|", "").replace(" ", "") \
+                            .replace('"', '').replace("'", '')
+                        # 随机取一个表情/符号
+                        emoji = random.choice(get_config_from_mysql(log_type, crawler, env, "emoji"))
+                        # 生成最终标题,标题list[表情+title, title+表情]随机取一个
+                        video_title = random.choice([f"{emoji}{xiaoniangao_title}", f"{xiaoniangao_title}{emoji}"])
+                        # 发布时间
+                        publish_time_stamp = int(int(feeds[i].get("t", 0)) / 1000)
+                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                        # 用户名 / 头像
+                        user_name = feeds[i].get("album_user", {}).get("nick", "").strip().replace("\n", "") \
+                            .replace("/", "").replace("快手", "").replace(" ", "") \
+                            .replace(" ", "").replace("&NBSP", "").replace("\r", "")
 
 
-                    video_dict = {
-                        "video_id": video_id,
-                        "video_title": video_title,
-                        "duration": duration,
-                        "play_cnt": play_cnt,
-                        "like_cnt": like_cnt,
-                        "comment_cnt": comment_cnt,
-                        "share_cnt": share_cnt,
-                        "user_name": user_name,
-                        "publish_time_stamp": publish_time_stamp,
-                        "publish_time_str": publish_time_str,
-                        "video_width": video_width,
-                        "video_height": video_height,
-                        "avatar_url": avatar_url,
-                        "profile_id": profile_id,
-                        "profile_mid": profile_mid,
-                        "cover_url": cover_url,
-                        "video_url": video_url,
-                        "session": f"xiaoniangao-author-{int(time.time())}"
-                    }
-                    for k, v in video_dict.items():
-                        Common.logger(log_type, crawler).info(f"{k}:{v}")
-                    if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('min', 1000)):
-                        Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('min', 1000))}天\n")
-                        cls.next_t = None
-                        return
+                        video_dict = {
+                            "video_id": feeds[i].get("vid", ""),
+                            "video_title": video_title,
+                            "duration": int(feeds[i].get("du", 0) / 1000),
+                            "play_cnt": feeds[i].get("play_pv", 0),
+                            "like_cnt": feeds[i].get("favor", {}).get("total", 0),
+                            "comment_cnt": feeds[i].get("comment_count", 0),
+                            "share_cnt": feeds[i].get("share", 0),
+                            "user_name": user_name,
+                            "publish_time_stamp": publish_time_stamp,
+                            "publish_time_str": publish_time_str,
+                            "video_width": int(feeds[i].get("w", 0)),
+                            "video_height": int(feeds[i].get("h", 0)),
+                            "avatar_url": feeds[i].get("album_user", {}).get("hurl", ""),
+                            "profile_id": feeds[i]["id"],
+                            "profile_mid": feeds[i]["mid"],
+                            "cover_url": feeds[i].get("url", ""),
+                            "video_url": feeds[i].get("v_url", ""),
+                            "session": f"xiaoniangao-author-{int(time.time())}"
+                        }
+                        for k, v in video_dict.items():
+                            Common.logger(log_type, crawler).info(f"{k}:{v}")
+                        if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('max', 1000)):
+                            Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
+                            return
 
 
-                    # 过滤无效视频
-                    if video_title == "" or video_id == "" or video_url == "":
-                        Common.logger(log_type, crawler).info("无效视频\n")
+                        # 过滤无效视频
+                        if video_title == "" or video_dict["video_id"] == "" or video_dict["video_url"] == "":
+                            Common.logger(log_type, crawler).info("无效视频\n")
                         # 抓取基础规则过滤
                         # 抓取基础规则过滤
-                    elif cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
-                        Common.logger(log_type, crawler).info("不满足抓取规则\n")
-                    elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
-                        Common.logger(log_type, crawler).info('视频已下载\n')
-                    # 过滤词
-                    elif any(str(word) if str(word) in video_title else False for word in get_config_from_mysql(log_type, crawler, env, "filter", action="")) is True:
-                        Common.logger(log_type, crawler).info("视频已中过滤词\n")
-                    else:
-                        cls.download_publish(log_type=log_type,
-                                             crawler=crawler,
-                                             strategy=strategy,
-                                             video_dict=video_dict,
-                                             rule_dict=rule_dict,
-                                             uid=uid,
-                                             oss_endpoint=oss_endpoint,
-                                             env=env)
+                        elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                            Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                        elif any(str(word) if str(word) in video_dict["video_title"] else False
+                                 for word in get_config_from_mysql(log_type=log_type,
+                                                                   source=crawler,
+                                                                   env=env,
+                                                                   text="filter",
+                                                                   action="")) is True:
+                            Common.logger(log_type, crawler).info('已中过滤词\n')
+                        elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                            Common.logger(log_type, crawler).info('视频已下载\n')
+                        else:
+                            cls.download_publish(log_type=log_type,
+                                                 crawler=crawler,
+                                                 video_dict=video_dict,
+                                                 rule_dict=rule_dict,
+                                                 user_dict=user_dict,
+                                                 env=env)
+                    except Exception as e:
+                        Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
 
 
     # 下载/上传
     # 下载/上传
     @classmethod
     @classmethod
-    def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, uid, oss_endpoint, env):
-        # 下载封面
-        Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict["video_title"], url=video_dict["cover_url"])
+    def download_publish(cls, log_type, crawler, video_dict, rule_dict, user_dict, env):
         # 下载视频
         # 下载视频
         Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
         Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            return
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict["video_title"], url=video_dict["cover_url"])
         # 保存视频信息至 "./videos/{download_video_title}/info.txt"
         # 保存视频信息至 "./videos/{download_video_title}/info.txt"
         Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
         Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
 
 
         # 上传视频
         # 上传视频
         Common.logger(log_type, crawler).info("开始上传视频...")
         Common.logger(log_type, crawler).info("开始上传视频...")
-        our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                  crawler=crawler,
-                                                  strategy=strategy,
-                                                  our_uid=uid,
-                                                  env=env,
-                                                  oss_endpoint=oss_endpoint)
         if env == "dev":
         if env == "dev":
+            oss_endpoint = "out"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="定向抓取策略",
+                                                      our_uid=user_dict["uid"],
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
             our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
             our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
         else:
         else:
+            oss_endpoint = "inner"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="定向抓取策略",
+                                                      our_uid=user_dict["uid"],
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+
             our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
             our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-        Common.logger(log_type, crawler).info("视频上传完成")
 
 
         if our_video_id is None:
         if our_video_id is None:
-            # 删除视频文件夹
-            shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-            return
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
 
 
         insert_sql = f""" insert into crawler_video(video_id,
         insert_sql = f""" insert into crawler_video(video_id,
                                         out_user_id,
                                         out_user_id,
@@ -331,7 +233,7 @@ class XiaoniangaoAuthorScheduling:
                                         values({our_video_id},
                                         values({our_video_id},
                                         "{video_dict['profile_id']}",
                                         "{video_dict['profile_id']}",
                                         "{cls.platform}",
                                         "{cls.platform}",
-                                        "定向爬虫策略",
+                                        "定向抓取策略",
                                         "{video_dict['video_id']}",
                                         "{video_dict['video_id']}",
                                         "{video_dict['video_title']}",
                                         "{video_dict['video_title']}",
                                         "{video_dict['cover_url']}",
                                         "{video_dict['cover_url']}",
@@ -344,7 +246,7 @@ class XiaoniangaoAuthorScheduling:
                                         {int(video_dict['video_height'])}) """
                                         {int(video_dict['video_height'])}) """
         Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
         Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
         MysqlHelper.update_values(log_type, crawler, insert_sql, env)
         MysqlHelper.update_values(log_type, crawler, insert_sql, env)
-        Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+        Common.logger(log_type, crawler).info('视频信息些入数据库成功')
 
 
         # 视频写入飞书
         # 视频写入飞书
         Feishu.insert_columns(log_type, crawler, "Wu0CeL", "ROWS", 1, 2)
         Feishu.insert_columns(log_type, crawler, "Wu0CeL", "ROWS", 1, 2)
@@ -374,29 +276,17 @@ class XiaoniangaoAuthorScheduling:
 
 
     # 获取所有关注列表的用户视频
     # 获取所有关注列表的用户视频
     @classmethod
     @classmethod
-    def get_follow_videos(cls, log_type, crawler, user_list, rule_dict, strategy, oss_endpoint, env):
-        if len(user_list) == 0:
-            Common.logger(log_type, crawler).warning(f"抓取用户列表为空\n")
-            return
-        for user in user_list:
-            # Common.logger(log_type, crawler).info(f"user:{user}")
-            try:
-                user_name = user['nick_name']
-                profile_mid = user['link']
-                uid = user['uid']
-                Common.logger(log_type, crawler).info(f"获取 {user_name} 主页视频")
-                cls.get_videoList(log_type=log_type,
-                                  crawler=crawler,
-                                  strategy=strategy,
-                                  p_mid=profile_mid,
-                                  rule_dict=rule_dict,
-                                  uid=uid,
-                                  oss_endpoint=oss_endpoint,
-                                  env=env)
-                cls.next_t = None
-                time.sleep(1)
-            except Exception as e:
-                Common.logger(log_type, crawler).error(f"get_follow_videos:{e}\n")
+    def get_author_videos(cls, log_type, crawler, user_list, rule_dict, env):
+        for user_dict in user_list:
+            # try:
+            Common.logger(log_type, crawler).info(f"获取 {user_dict['nick_name']} 主页视频")
+            cls.get_videoList(log_type=log_type,
+                              crawler=crawler,
+                              rule_dict=rule_dict,
+                              user_dict=user_dict,
+                              env=env)
+            # except Exception as e:
+            #     Common.logger(log_type, crawler).error(f"抓取{user_dict['nick_name']}主页时异常:{e}\n")
 
 
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":

+ 238 - 353
xiaoniangao/xiaoniangao_hour/xiaoniangao_hour_scheduling.py

@@ -8,6 +8,8 @@ import random
 import shutil
 import shutil
 import sys
 import sys
 import time
 import time
+from hashlib import md5
+
 import requests
 import requests
 import urllib3
 import urllib3
 sys.path.append(os.getcwd())
 sys.path.append(os.getcwd())
@@ -15,130 +17,19 @@ from common.common import Common
 from common.feishu import Feishu
 from common.feishu import Feishu
 from common.publish import Publish
 from common.publish import Publish
 from common.scheduling_db import MysqlHelper
 from common.scheduling_db import MysqlHelper
-from common.public import get_config_from_mysql
+from common.public import get_config_from_mysql, download_rule
+
 proxies = {"http": None, "https": None}
 proxies = {"http": None, "https": None}
 
 
 
 
 class XiaoniangaoHourScheduling:
 class XiaoniangaoHourScheduling:
     platform = "小年糕"
     platform = "小年糕"
     words = "abcdefghijklmnopqrstuvwxyz0123456789"
     words = "abcdefghijklmnopqrstuvwxyz0123456789"
-    uid = f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}"""
-    token = "".join(random.sample(words, 32))
     uid_token_dict = {
     uid_token_dict = {
-        "uid": uid,
-        "token": token
+        "uid": f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}""",
+        "token": "".join(random.sample(words, 32))
     }
     }
 
 
-    # 生成 uid、token
-    @classmethod
-    def get_uid_token(cls):
-        words = "abcdefghijklmnopqrstuvwxyz0123456789"
-        uid = f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}"""
-        token = "".join(random.sample(words, 32))
-        uid_token_dict = {
-            "uid": uid,
-            "token": token
-        }
-        return uid_token_dict
-
-    # 基础门槛规则
-    @staticmethod
-    def download_rule(log_type, crawler, video_dict, rule_dict):
-        """
-        下载视频的基本规则
-        :param log_type: 日志
-        :param crawler: 哪款爬虫
-        :param video_dict: 视频信息,字典格式
-        :param rule_dict: 规则信息,字典格式
-        :return: 满足规则,返回 True;反之,返回 False
-        """
-        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
-        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
-        if rule_play_cnt_max == 0:
-            rule_play_cnt_max = 100000000
-
-        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
-        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
-        if rule_duration_max == 0:
-            rule_duration_max = 100000000
-
-        rule_period_min = rule_dict.get('period', {}).get('min', 0)
-        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
-        # if rule_period_max == 0:
-        #     rule_period_max = 100000000
-
-        rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
-        rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
-        if rule_fans_cnt_max == 0:
-            rule_fans_cnt_max = 100000000
-
-        rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
-        rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
-        if rule_videos_cnt_max == 0:
-            rule_videos_cnt_max = 100000000
-
-        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
-        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
-        if rule_like_cnt_max == 0:
-            rule_like_cnt_max = 100000000
-
-        rule_width_min = rule_dict.get('width', {}).get('min', 0)
-        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
-        if rule_width_max == 0:
-            rule_width_max = 100000000
-
-        rule_height_min = rule_dict.get('height', {}).get('min', 0)
-        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
-        if rule_height_max == 0:
-            rule_height_max = 100000000
-
-        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
-        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
-        if rule_share_cnt_max == 0:
-            rule_share_cnt_max = 100000000
-
-        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
-        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
-        if rule_comment_cnt_max == 0:
-            rule_comment_cnt_max = 100000000
-
-        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
-        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 0)
-        if rule_publish_time_max == 0:
-            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
-
-        Common.logger(log_type, crawler).info(
-            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
-
-        if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
-                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
-                and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min) \
-                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
-                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
-                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
-                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
-                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
-                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min):
-            return True
-        else:
-            return False
-
     @classmethod
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
     def repeat_video(cls, log_type, crawler, video_id, env):
         sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
         sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
@@ -236,116 +127,99 @@ class XiaoniangaoHourScheduling:
             # 视频列表数据
             # 视频列表数据
             feeds = r.json()["data"]["list"]
             feeds = r.json()["data"]["list"]
             for i in range(len(feeds)):
             for i in range(len(feeds)):
-                # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
-                xiaoniangao_title = feeds[i].get("title", "").strip().replace("\n", "") \
-                        .replace("/", "").replace("\r", "").replace("#", "") \
-                        .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
-                        .replace(":", "").replace("*", "").replace("?", "") \
-                        .replace("?", "").replace('"', "").replace("<", "") \
-                        .replace(">", "").replace("|", "").replace(" ", "")\
-                        .replace('"', '').replace("'", '')
-                # 随机取一个表情/符号
-                emoji = random.choice(get_config_from_mysql(log_type, crawler, env, "emoji"))
-                # 生成最终标题,标题list[表情+title, title+表情]随机取一个
-                video_title = random.choice([f"{emoji}{xiaoniangao_title}", f"{xiaoniangao_title}{emoji}"])
-                # 视频 ID
-                video_id = feeds[i].get("vid", "")
-                # 播放量
-                play_cnt = feeds[i].get("play_pv", 0)
-                # 点赞量
-                like_cnt = feeds[i].get("favor", {}).get("total", 0)
-                # 评论数
-                comment_cnt = feeds[i].get("comment_count", 0)
-                # 分享量
-                share_cnt = feeds[i].get("share", 0)
-                # 时长
-                duration = int(feeds[i].get("du", 0)/1000)
-                # 宽和高
-                video_width = int(feeds[i].get("w", 0))
-                video_height = int(feeds[i].get("h", 0))
-                # 发布时间
-                publish_time_stamp = int(int(feeds[i].get("t", 0))/1000)
-                publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-                # 用户名 / 头像
-                user_name = feeds[i].get("user", {}).get("nick", "").strip().replace("\n", "") \
-                        .replace("/", "").replace("快手", "").replace(" ", "") \
-                        .replace(" ", "").replace("&NBSP", "").replace("\r", "")
-                avatar_url = feeds[i].get("user", {}).get("hurl", "")
-                # 用户 ID
-                profile_id = feeds[i]["id"]
-                # 用户 mid
-                profile_mid = feeds[i]["user"]["mid"]
-                # 视频封面
-                cover_url = feeds[i].get("url", "")
-                # 视频播放地址
-                video_url = feeds[i].get("v_url", "")
-
-                video_dict = {
-                    "video_title": video_title,
-                    "video_id": video_id,
-                    "duration": duration,
-                    "play_cnt": play_cnt,
-                    "like_cnt": like_cnt,
-                    "comment_cnt": comment_cnt,
-                    "share_cnt": share_cnt,
-                    "user_name": user_name,
-                    "publish_time_stamp": publish_time_stamp,
-                    "publish_time_str": publish_time_str,
-                    "video_width": video_width,
-                    "video_height": video_height,
-                    "avatar_url": avatar_url,
-                    "profile_id": profile_id,
-                    "profile_mid": profile_mid,
-                    "cover_url": cover_url,
-                    "video_url": video_url,
-                    "session": f"xiaoniangao-hour-{int(time.time())}"
-                }
-                for k, v in video_dict.items():
-                    Common.logger(log_type, crawler).info(f"{k}:{v}")
-
-                # 过滤无效视频
-                if video_title == "" or video_id == "" or video_url == "":
-                    Common.logger(log_type, crawler).warning("无效视频\n")
-                # 抓取基础规则过滤
-                elif cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
-                    Common.logger(log_type, crawler).info("不满足抓取规则\n")
-                elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
-                    Common.logger(log_type, crawler).info('视频已下载\n')
-                # 过滤敏感词
-                elif any(str(word) if str(word) in video_title else False for word in get_config_from_mysql(log_type, crawler, env, "filter", action="")) is True:
-                    Common.logger(log_type, crawler).info("视频已中过滤词\n")
-                else:
-                    # 写入飞书小时级feeds数据库表
-                    insert_sql = f""" insert into crawler_xiaoniangao_hour(profile_id,
-                    profile_mid,
-                    platform,
-                    out_video_id,
-                    video_title,
-                    user_name,
-                    cover_url,
-                    video_url,
-                    duration,
-                    publish_time,
-                    play_cnt,
-                    crawler_time_stamp,
-                    crawler_time)
-                    values({profile_id},
-                    {profile_mid},
-                    "{cls.platform}",
-                    "{video_id}",
-                    "{video_title}",
-                    "{user_name}",
-                    "{cover_url}",
-                    "{video_url}",
-                    {duration},
-                    "{publish_time_str}",
-                    {play_cnt},
-                    {int(time.time())},
-                    "{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time())))}"
-                    )"""
-                    Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-                    MysqlHelper.update_values(log_type, crawler, insert_sql, env)
-                    Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+                try:
+                    # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
+                    xiaoniangao_title = feeds[i].get("title", "").strip().replace("\n", "") \
+                            .replace("/", "").replace("\r", "").replace("#", "") \
+                            .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
+                            .replace(":", "").replace("*", "").replace("?", "") \
+                            .replace("?", "").replace('"', "").replace("<", "") \
+                            .replace(">", "").replace("|", "").replace(" ", "")\
+                            .replace('"', '').replace("'", '')
+                    # 随机取一个表情/符号
+                    emoji = random.choice(get_config_from_mysql(log_type, crawler, env, "emoji"))
+                    # 生成最终标题,标题list[表情+title, title+表情]随机取一个
+                    video_title = random.choice([f"{emoji}{xiaoniangao_title}", f"{xiaoniangao_title}{emoji}"])
+                    # 发布时间
+                    publish_time_stamp = int(int(feeds[i].get("t", 0))/1000)
+                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                    # 用户名 / 头像
+                    user_name = feeds[i].get("user", {}).get("nick", "").strip().replace("\n", "") \
+                            .replace("/", "").replace("快手", "").replace(" ", "") \
+                            .replace(" ", "").replace("&NBSP", "").replace("\r", "")
+
+                    video_dict = {
+                        "video_title": video_title,
+                        "video_id": feeds[i].get("vid", ""),
+                        "duration": int(feeds[i].get("du", 0)/1000),
+                        "play_cnt": feeds[i].get("play_pv", 0),
+                        "like_cnt": feeds[i].get("favor", {}).get("total", 0),
+                        "comment_cnt": feeds[i].get("comment_count", 0),
+                        "share_cnt": feeds[i].get("share", 0),
+                        "user_name": user_name,
+                        "publish_time_stamp": publish_time_stamp,
+                        "publish_time_str": publish_time_str,
+                        "video_width": int(feeds[i].get("w", 0)),
+                        "video_height": int(feeds[i].get("h", 0)),
+                        "avatar_url": feeds[i].get("user", {}).get("hurl", ""),
+                        "profile_id": feeds[i]["id"],
+                        "profile_mid": feeds[i]["user"]["mid"],
+                        "cover_url": feeds[i].get("url", ""),
+                        "video_url": feeds[i].get("v_url", ""),
+                        "session": f"xiaoniangao-hour-{int(time.time())}"
+                    }
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                    # 过滤无效视频
+                    if video_title == "" or video_dict["video_id"] == "" or video_dict["video_url"] == "":
+                        Common.logger(log_type, crawler).warning("无效视频\n")
+                    # 抓取基础规则过滤
+                    elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                        Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                    elif any(str(word) if str(word) in video_dict["video_title"] else False
+                             for word in get_config_from_mysql(log_type=log_type,
+                                                               source=crawler,
+                                                               env=env,
+                                                               text="filter",
+                                                               action="")) is True:
+                        Common.logger(log_type, crawler).info('已中过滤词\n')
+                    elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                        Common.logger(log_type, crawler).info('视频已下载\n')
+                    else:
+                        # 写入飞书小时级feeds数据库表
+                        insert_sql = f""" insert into crawler_xiaoniangao_hour(profile_id,
+                        profile_mid,
+                        platform,
+                        out_video_id,
+                        video_title,
+                        user_name,
+                        cover_url,
+                        video_url,
+                        duration,
+                        publish_time,
+                        play_cnt,
+                        crawler_time_stamp,
+                        crawler_time)
+                        values({video_dict["profile_id"]},
+                        {video_dict["profile_mid"]},
+                        "{cls.platform}",
+                        "{video_dict["video_id"]}",
+                        "{video_title}",
+                        "{user_name}",
+                        "{video_dict["cover_url"]}",
+                        "{video_dict["video_url"]}",
+                        {video_dict["duration"]},
+                        "{publish_time_str}",
+                        {video_dict["play_cnt"]},
+                        {int(time.time())},
+                        "{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time())))}"
+                        )"""
+                        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+                        Common.logger(log_type, crawler).info('视频信息写入小时级数据库成功!\n')
+                except Exception as e:
+                    Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
 
 
     @classmethod
     @classmethod
     def get_video_info(cls, log_type, crawler, p_id, p_mid, v_title, v_id):
     def get_video_info(cls, log_type, crawler, p_id, p_mid, v_title, v_id):
@@ -444,150 +318,166 @@ class XiaoniangaoHourScheduling:
 
 
     # 更新小时榜数据
     # 更新小时榜数据
     @classmethod
     @classmethod
-    def update_videoList(cls, log_type, crawler, rule_dict, strategy, oss_endpoint, env):
+    def update_videoList(cls, log_type, crawler, rule_dict, our_uid, env):
         """
         """
         更新小时榜数据
         更新小时榜数据
         """
         """
         befor_yesterday = (datetime.date.today() + datetime.timedelta(days=-3)).strftime("%Y-%m-%d %H:%M:%S")
         befor_yesterday = (datetime.date.today() + datetime.timedelta(days=-3)).strftime("%Y-%m-%d %H:%M:%S")
         update_time_stamp = int(time.mktime(time.strptime(befor_yesterday, "%Y-%m-%d %H:%M:%S")))
         update_time_stamp = int(time.mktime(time.strptime(befor_yesterday, "%Y-%m-%d %H:%M:%S")))
-        select_sql = f""" select * from crawler_xiaoniangao_hour where crawler_time_stamp >= {update_time_stamp} GROUP BY out_video_id """
+        select_sql = f""" select * from crawler_xiaoniangao_hour where crawler_time_stamp>={update_time_stamp} GROUP BY out_video_id DESC """
         update_video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env)
         update_video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env)
         if len(update_video_list) == 0:
         if len(update_video_list) == 0:
             Common.logger(log_type, crawler).info("暂无需要更新的小时榜数据\n")
             Common.logger(log_type, crawler).info("暂无需要更新的小时榜数据\n")
             return
             return
         for update_video_info in update_video_list:
         for update_video_info in update_video_list:
-            profile_id = update_video_info["profile_id"]
-            profile_mid = update_video_info["profile_mid"]
-            video_title = update_video_info["video_title"]
-            video_id = update_video_info["out_video_id"]
-            if datetime.datetime.now().hour == 10 and datetime.datetime.now().minute <= 10:
-                video_info_dict = cls.get_video_info(log_type=log_type,
-                                                     crawler=crawler,
-                                                     p_id=profile_id,
-                                                     p_mid=profile_mid,
-                                                     v_title=video_title,
-                                                     v_id=video_id)
-                ten_play_cnt = video_info_dict['play_cnt']
-                Common.logger(log_type, crawler).info(f"ten_play_cnt:{ten_play_cnt}")
-                update_sql = f""" update crawler_xiaoniangao_hour set ten_play_cnt={ten_play_cnt} WHERE out_video_id="{video_id}"; """
-                # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
-                MysqlHelper.update_values(log_type, crawler, update_sql, env)
-                cls.download_publish(log_type=log_type,
-                                     crawler=crawler,
-                                     video_info_dict=video_info_dict,
-                                     rule_dict=rule_dict,
-                                     update_video_info=update_video_info,
-                                     strategy=strategy,
-                                     oss_endpoint=oss_endpoint,
-                                     env=env)
-            elif datetime.datetime.now().hour == 15 and datetime.datetime.now().minute <= 10:
-                video_info_dict = cls.get_video_info(log_type=log_type,
-                                                     crawler=crawler,
-                                                     p_id=profile_id,
-                                                     p_mid=profile_mid,
-                                                     v_title=video_title,
-                                                     v_id=video_id)
-                fifteen_play_cnt = video_info_dict['play_cnt']
-                Common.logger(log_type, crawler).info(f"fifteen_play_cnt:{fifteen_play_cnt}")
-                update_sql = f""" update crawler_xiaoniangao_hour set fifteen_play_cnt={fifteen_play_cnt} WHERE out_video_id="{video_id}"; """
-                # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
-                MysqlHelper.update_values(log_type, crawler, update_sql, env)
-                cls.download_publish(log_type=log_type,
-                                     crawler=crawler,
-                                     video_info_dict=video_info_dict,
-                                     rule_dict=rule_dict,
-                                     update_video_info=update_video_info,
-                                     strategy=strategy,
-                                     oss_endpoint=oss_endpoint,
-                                     env=env)
-            elif datetime.datetime.now().hour == 20 and datetime.datetime.now().minute <= 10:
-                video_info_dict = cls.get_video_info(log_type=log_type,
-                                                     crawler=crawler,
-                                                     p_id=profile_id,
-                                                     p_mid=profile_mid,
-                                                     v_title=video_title,
-                                                     v_id=video_id)
-                twenty_play_cnt = video_info_dict['play_cnt']
-                Common.logger(log_type, crawler).info(f"twenty_play_cnt:{twenty_play_cnt}")
-                update_sql = f""" update crawler_xiaoniangao_hour set twenty_play_cnt={twenty_play_cnt} WHERE out_video_id="{video_id}"; """
-                # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
-                MysqlHelper.update_values(log_type, crawler, update_sql, env)
-                cls.download_publish(log_type=log_type,
-                                     crawler=crawler,
-                                     video_info_dict=video_info_dict,
-                                     rule_dict=rule_dict,
-                                     update_video_info=update_video_info,
-                                     strategy=strategy,
-                                     oss_endpoint=oss_endpoint,
-                                     env=env)
-            else:
-                pass
+            try:
+                profile_id = update_video_info["profile_id"]
+                profile_mid = update_video_info["profile_mid"]
+                video_title = update_video_info["video_title"]
+                video_id = update_video_info["out_video_id"]
+                if datetime.datetime.now().hour == 10 and datetime.datetime.now().minute <= 10:
+                    video_info_dict = cls.get_video_info(log_type=log_type,
+                                                         crawler=crawler,
+                                                         p_id=profile_id,
+                                                         p_mid=profile_mid,
+                                                         v_title=video_title,
+                                                         v_id=video_id)
+                    ten_play_cnt = video_info_dict['play_cnt']
+                    Common.logger(log_type, crawler).info(f"ten_play_cnt:{ten_play_cnt}")
+                    update_sql = f""" update crawler_xiaoniangao_hour set ten_play_cnt={ten_play_cnt} WHERE out_video_id="{video_id}"; """
+                    # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
+                    MysqlHelper.update_values(log_type, crawler, update_sql, env)
+                    cls.download_publish(log_type=log_type,
+                                         crawler=crawler,
+                                         video_info_dict=video_info_dict,
+                                         rule_dict=rule_dict,
+                                         update_video_info=update_video_info,
+                                         our_uid=our_uid,
+                                         env=env)
+                elif datetime.datetime.now().hour == 15 and datetime.datetime.now().minute <= 10:
+                    video_info_dict = cls.get_video_info(log_type=log_type,
+                                                         crawler=crawler,
+                                                         p_id=profile_id,
+                                                         p_mid=profile_mid,
+                                                         v_title=video_title,
+                                                         v_id=video_id)
+                    fifteen_play_cnt = video_info_dict['play_cnt']
+                    Common.logger(log_type, crawler).info(f"fifteen_play_cnt:{fifteen_play_cnt}")
+                    update_sql = f""" update crawler_xiaoniangao_hour set fifteen_play_cnt={fifteen_play_cnt} WHERE out_video_id="{video_id}"; """
+                    # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
+                    MysqlHelper.update_values(log_type, crawler, update_sql, env)
+                    cls.download_publish(log_type=log_type,
+                                         crawler=crawler,
+                                         video_info_dict=video_info_dict,
+                                         rule_dict=rule_dict,
+                                         update_video_info=update_video_info,
+                                         our_uid=our_uid,
+                                         env=env)
+                elif datetime.datetime.now().hour == 20 and datetime.datetime.now().minute <= 10:
+                    video_info_dict = cls.get_video_info(log_type=log_type,
+                                                         crawler=crawler,
+                                                         p_id=profile_id,
+                                                         p_mid=profile_mid,
+                                                         v_title=video_title,
+                                                         v_id=video_id)
+                    twenty_play_cnt = video_info_dict['play_cnt']
+                    Common.logger(log_type, crawler).info(f"twenty_play_cnt:{twenty_play_cnt}")
+                    update_sql = f""" update crawler_xiaoniangao_hour set twenty_play_cnt={twenty_play_cnt} WHERE out_video_id="{video_id}"; """
+                    # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
+                    MysqlHelper.update_values(log_type, crawler, update_sql, env)
+                    cls.download_publish(log_type=log_type,
+                                         crawler=crawler,
+                                         video_info_dict=video_info_dict,
+                                         rule_dict=rule_dict,
+                                         update_video_info=update_video_info,
+                                         our_uid=our_uid,
+                                         env=env)
+                else:
+                    pass
+            except Exception as e:
+                Common.logger(log_type, crawler).error(f'更新{update_video_info["video_title"]}时异常:{e}\n')
 
 
     @classmethod
     @classmethod
-    def download(cls, log_type, crawler, video_info_dict, rule_dict, strategy, oss_endpoint, env):
-        # 下载封面
-        Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_info_dict["video_title"],
-                               url=video_info_dict["cover_url"])
+    def download(cls, log_type, crawler, video_info_dict, rule_dict, our_uid, env):
         # 下载视频
         # 下载视频
         Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_info_dict["video_title"],
         Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_info_dict["video_title"],
                                url=video_info_dict["video_url"])
                                url=video_info_dict["video_url"])
+        md_title = md5(video_info_dict['video_title'].encode('utf8')).hexdigest()
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            return
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_info_dict["video_title"],
+                               url=video_info_dict["cover_url"])
         # 保存视频信息至 "./videos/{download_video_title}/info.txt"
         # 保存视频信息至 "./videos/{download_video_title}/info.txt"
         Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_info_dict)
         Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_info_dict)
 
 
         # 上传视频
         # 上传视频
         Common.logger(log_type, crawler).info("开始上传视频...")
         Common.logger(log_type, crawler).info("开始上传视频...")
-        our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                  crawler=crawler,
-                                                  strategy=strategy,
-                                                  our_uid="hour",
-                                                  env=env,
-                                                  oss_endpoint=oss_endpoint)
         if env == "dev":
         if env == "dev":
+            oss_endpoint = "out"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="上升榜抓取策略",
+                                                      our_uid=our_uid,
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
             our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
             our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
         else:
         else:
+            oss_endpoint = "inner"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="上升榜抓取策略",
+                                                      our_uid=our_uid,
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+
             our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
             our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-        Common.logger(log_type, crawler).info("视频上传完成")
 
 
         if our_video_id is None:
         if our_video_id is None:
-            # 删除视频文件夹
-            shutil.rmtree(f"./{crawler}/videos/{video_info_dict['video_title']}")
-            return
-
-        # # 视频信息保存数据库
-        # rule_dict = {
-        #     "duration": {"min": 40},
-        #     "play_cnt": {"min": 4000},
-        #     "publish_day": {"min": 10}
-        # }
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
 
 
         insert_sql = f""" insert into crawler_video(video_id,
         insert_sql = f""" insert into crawler_video(video_id,
-                                                        out_user_id,
-                                                        platform,
-                                                        strategy,
-                                                        out_video_id,
-                                                        video_title,
-                                                        cover_url,
-                                                        video_url,
-                                                        duration,
-                                                        publish_time,
-                                                        play_cnt,
-                                                        crawler_rule,
-                                                        width,
-                                                        height)
-                                                        values({our_video_id},
-                                                        "{video_info_dict['profile_id']}",
-                                                        "{cls.platform}",
-                                                        "小时榜爬虫策略",
-                                                        "{video_info_dict['video_id']}",
-                                                        "{video_info_dict['video_title']}",
-                                                        "{video_info_dict['cover_url']}",
-                                                        "{video_info_dict['video_url']}",
-                                                        {int(video_info_dict['duration'])},
-                                                        "{video_info_dict['publish_time_str']}",
-                                                        {int(video_info_dict['play_cnt'])},
-                                                        '{json.dumps(rule_dict)}',
-                                                        {int(video_info_dict['video_width'])},
-                                                        {int(video_info_dict['video_height'])}) """
+                                                    out_user_id,
+                                                    platform,
+                                                    strategy,
+                                                    out_video_id,
+                                                    video_title,
+                                                    cover_url,
+                                                    video_url,
+                                                    duration,
+                                                    publish_time,
+                                                    play_cnt,
+                                                    crawler_rule,
+                                                    width,
+                                                    height)
+                                                    values({our_video_id},
+                                                    "{video_info_dict['profile_id']}",
+                                                    "{cls.platform}",
+                                                    "上升榜抓取策略",
+                                                    "{video_info_dict['video_id']}",
+                                                    "{video_info_dict['video_title']}",
+                                                    "{video_info_dict['cover_url']}",
+                                                    "{video_info_dict['video_url']}",
+                                                    {int(video_info_dict['duration'])},
+                                                    "{video_info_dict['publish_time_str']}",
+                                                    {int(video_info_dict['play_cnt'])},
+                                                    '{json.dumps(rule_dict)}',
+                                                    {int(video_info_dict['video_width'])},
+                                                    {int(video_info_dict['video_height'])}) """
         Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
         Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
         MysqlHelper.update_values(log_type, crawler, insert_sql, env)
         MysqlHelper.update_values(log_type, crawler, insert_sql, env)
         Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
         Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
@@ -597,7 +487,7 @@ class XiaoniangaoHourScheduling:
         # 视频ID工作表,首行写入数据
         # 视频ID工作表,首行写入数据
         upload_time = int(time.time())
         upload_time = int(time.time())
         values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
         values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
-                   "小时级上升榜",
+                   "上升榜抓取策略",
                    str(video_info_dict['video_id']),
                    str(video_info_dict['video_id']),
                    str(video_info_dict['video_title']),
                    str(video_info_dict['video_title']),
                    our_video_link,
                    our_video_link,
@@ -620,7 +510,7 @@ class XiaoniangaoHourScheduling:
 
 
     # 下载/上传
     # 下载/上传
     @classmethod
     @classmethod
-    def download_publish(cls, log_type, crawler, video_info_dict, rule_dict, update_video_info, strategy, oss_endpoint, env):
+    def download_publish(cls, log_type, crawler, video_info_dict, rule_dict, update_video_info, our_uid, env):
         if cls.repeat_video(log_type, crawler, video_info_dict["video_id"], env) != 0:
         if cls.repeat_video(log_type, crawler, video_info_dict["video_id"], env) != 0:
             Common.logger(log_type, crawler).info('视频已下载\n')
             Common.logger(log_type, crawler).info('视频已下载\n')
         # 播放量大于 50000,直接下载
         # 播放量大于 50000,直接下载
@@ -631,8 +521,7 @@ class XiaoniangaoHourScheduling:
                          crawler=crawler,
                          crawler=crawler,
                          video_info_dict=video_info_dict,
                          video_info_dict=video_info_dict,
                          rule_dict=rule_dict,
                          rule_dict=rule_dict,
-                         strategy=strategy,
-                         oss_endpoint=oss_endpoint,
+                         our_uid=our_uid,
                          env=env)
                          env=env)
 
 
         # 上升榜判断逻辑,任意时间段上升量>=5000,连续两个时间段上升量>=2000
         # 上升榜判断逻辑,任意时间段上升量>=5000,连续两个时间段上升量>=2000
@@ -645,8 +534,7 @@ class XiaoniangaoHourScheduling:
                          crawler=crawler,
                          crawler=crawler,
                          video_info_dict=video_info_dict,
                          video_info_dict=video_info_dict,
                          rule_dict=rule_dict,
                          rule_dict=rule_dict,
-                         strategy=strategy,
-                         oss_endpoint=oss_endpoint,
+                         our_uid=our_uid,
                          env=env)
                          env=env)
 
 
         elif int(update_video_info['ten_play_cnt']) >= 1000 and int(update_video_info['fifteen_play_cnt']) >= 1000:
         elif int(update_video_info['ten_play_cnt']) >= 1000 and int(update_video_info['fifteen_play_cnt']) >= 1000:
@@ -657,8 +545,7 @@ class XiaoniangaoHourScheduling:
                          crawler=crawler,
                          crawler=crawler,
                          video_info_dict=video_info_dict,
                          video_info_dict=video_info_dict,
                          rule_dict=rule_dict,
                          rule_dict=rule_dict,
-                         strategy=strategy,
-                         oss_endpoint=oss_endpoint,
+                         our_uid=our_uid,
                          env=env)
                          env=env)
 
 
         elif int(update_video_info['fifteen_play_cnt']) >= 1000 and int(update_video_info['twenty_play_cnt']) >= 1000:
         elif int(update_video_info['fifteen_play_cnt']) >= 1000 and int(update_video_info['twenty_play_cnt']) >= 1000:
@@ -669,8 +556,7 @@ class XiaoniangaoHourScheduling:
                          crawler=crawler,
                          crawler=crawler,
                          video_info_dict=video_info_dict,
                          video_info_dict=video_info_dict,
                          rule_dict=rule_dict,
                          rule_dict=rule_dict,
-                         strategy=strategy,
-                         oss_endpoint=oss_endpoint,
+                         our_uid=our_uid,
                          env=env)
                          env=env)
 
 
         elif int(update_video_info['ten_play_cnt']) >= 1000 and int(update_video_info['twenty_play_cnt']) >= 1000:
         elif int(update_video_info['ten_play_cnt']) >= 1000 and int(update_video_info['twenty_play_cnt']) >= 1000:
@@ -681,8 +567,7 @@ class XiaoniangaoHourScheduling:
                          crawler=crawler,
                          crawler=crawler,
                          video_info_dict=video_info_dict,
                          video_info_dict=video_info_dict,
                          rule_dict=rule_dict,
                          rule_dict=rule_dict,
-                         strategy=strategy,
-                         oss_endpoint=oss_endpoint,
+                         our_uid=our_uid,
                          env=env)
                          env=env)
 
 
         else:
         else:

+ 7 - 11
xiaoniangao/xiaoniangao_main/run_xiaoniangao_author_scheduling.py

@@ -11,7 +11,7 @@ from common.scheduling_db import MysqlHelper
 from xiaoniangao.xiaoniangao_author.xiaoniangao_author_scheduling import XiaoniangaoAuthorScheduling
 from xiaoniangao.xiaoniangao_author.xiaoniangao_author_scheduling import XiaoniangaoAuthorScheduling
 
 
 
 
-def main(log_type, crawler, task, oss_endpoint, env):
+def main(log_type, crawler, task, env):
     task_dict = task_fun(task)['task_dict']
     task_dict = task_fun(task)['task_dict']
     rule_dict = task_fun(task)['rule_dict']
     rule_dict = task_fun(task)['rule_dict']
     task_id = task_dict['task_id']
     task_id = task_dict['task_id']
@@ -21,13 +21,11 @@ def main(log_type, crawler, task, oss_endpoint, env):
     Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
     Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
     Common.logger(log_type, crawler).info(f"用户列表:\n{user_list}")
     Common.logger(log_type, crawler).info(f"用户列表:\n{user_list}")
     Common.logger(log_type, crawler).info('开始抓取 小年糕 定向榜\n')
     Common.logger(log_type, crawler).info('开始抓取 小年糕 定向榜\n')
-    XiaoniangaoAuthorScheduling.get_follow_videos(log_type=log_type,
-                                        crawler=crawler,
-                                        user_list=user_list,
-                                        rule_dict=rule_dict,
-                                        strategy="定向榜爬虫策略",
-                                        oss_endpoint=oss_endpoint,
-                                        env=env)
+    XiaoniangaoAuthorScheduling.get_author_videos(log_type=log_type,
+                                                  crawler=crawler,
+                                                  user_list=user_list,
+                                                  rule_dict=rule_dict,
+                                                  env=env)
     Common.del_logs(log_type, crawler)
     Common.del_logs(log_type, crawler)
     Common.logger(log_type, crawler).info('抓取完一轮\n')
     Common.logger(log_type, crawler).info('抓取完一轮\n')
 
 
@@ -37,11 +35,9 @@ if __name__ == "__main__":
     parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
     parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
     parser.add_argument('--crawler')  ## 添加参数
     parser.add_argument('--crawler')  ## 添加参数
     parser.add_argument('--task')  ## 添加参数
     parser.add_argument('--task')  ## 添加参数
-    parser.add_argument('--oss_endpoint')  ## 添加参数
     parser.add_argument('--env')  ## 添加参数
     parser.add_argument('--env')  ## 添加参数
     args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
     args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
     main(log_type=args.log_type,
     main(log_type=args.log_type,
          crawler=args.crawler,
          crawler=args.crawler,
          task=args.task,
          task=args.task,
-         oss_endpoint=args.oss_endpoint,
-         env=args.env)
+         env=args.env)

+ 21 - 11
xiaoniangao/xiaoniangao_main/run_xiaoniangao_hour_scheduling.py

@@ -4,29 +4,42 @@
 import argparse
 import argparse
 import datetime
 import datetime
 import os
 import os
+import random
 import sys
 import sys
-
 sys.path.append(os.getcwd())
 sys.path.append(os.getcwd())
+from common.scheduling_db import MysqlHelper
 from common.common import Common
 from common.common import Common
 from common.public import task_fun
 from common.public import task_fun
 from xiaoniangao.xiaoniangao_hour.xiaoniangao_hour_scheduling import XiaoniangaoHourScheduling
 from xiaoniangao.xiaoniangao_hour.xiaoniangao_hour_scheduling import XiaoniangaoHourScheduling
 
 
 
 
-def main(log_type, crawler, task, oss_endpoint, env):
+def main(log_type, crawler, task, env):
     task_dict = task_fun(task)['task_dict']
     task_dict = task_fun(task)['task_dict']
     rule_dict = task_fun(task)['rule_dict']
     rule_dict = task_fun(task)['rule_dict']
+    task_id = task_dict['task_id']
+    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+    our_uid_list = []
+    for user in user_list:
+        our_uid_list.append(user["uid"])
+    our_uid = random.choice(our_uid_list)
     Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
     Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
     Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
     Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
+    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]}\n')
     # 获取符合规则的视频,写入小时级数据_feeds
     # 获取符合规则的视频,写入小时级数据_feeds
-    XiaoniangaoHourScheduling.get_videoList(log_type, crawler, rule_dict, env)
+    for i in range(1, 101):
+        try:
+            Common.logger(log_type, crawler).info(f"正在抓取第{i}页")
+            XiaoniangaoHourScheduling.get_videoList(log_type, crawler, rule_dict, env)
+        except Exception as e:
+            Common.logger(log_type, crawler).info(f"抓取第{i}页时异常:{e}\n")
     now = datetime.datetime.now()
     now = datetime.datetime.now()
     if now.hour == 10 and 0 <= now.minute <= 10:
     if now.hour == 10 and 0 <= now.minute <= 10:
         Common.logger(log_type, crawler).info("开始更新/下载上升榜")
         Common.logger(log_type, crawler).info("开始更新/下载上升榜")
         XiaoniangaoHourScheduling.update_videoList(log_type=log_type,
         XiaoniangaoHourScheduling.update_videoList(log_type=log_type,
                                                    crawler=crawler,
                                                    crawler=crawler,
                                                    rule_dict=rule_dict,
                                                    rule_dict=rule_dict,
-                                                   strategy="小时榜爬虫策略",
-                                                   oss_endpoint=oss_endpoint,
+                                                   our_uid=our_uid,
                                                    env=env)
                                                    env=env)
 
 
     elif now.hour == 15 and now.minute <= 10:
     elif now.hour == 15 and now.minute <= 10:
@@ -34,8 +47,7 @@ def main(log_type, crawler, task, oss_endpoint, env):
         XiaoniangaoHourScheduling.update_videoList(log_type=log_type,
         XiaoniangaoHourScheduling.update_videoList(log_type=log_type,
                                                    crawler=crawler,
                                                    crawler=crawler,
                                                    rule_dict=rule_dict,
                                                    rule_dict=rule_dict,
-                                                   strategy="小时榜爬虫策略",
-                                                   oss_endpoint=oss_endpoint,
+                                                   our_uid=our_uid,
                                                    env=env)
                                                    env=env)
 
 
     elif now.hour == 20 and now.minute <= 10:
     elif now.hour == 20 and now.minute <= 10:
@@ -43,10 +55,10 @@ def main(log_type, crawler, task, oss_endpoint, env):
         XiaoniangaoHourScheduling.update_videoList(log_type=log_type,
         XiaoniangaoHourScheduling.update_videoList(log_type=log_type,
                                                    crawler=crawler,
                                                    crawler=crawler,
                                                    rule_dict=rule_dict,
                                                    rule_dict=rule_dict,
-                                                   strategy="小时榜爬虫策略",
-                                                   oss_endpoint=oss_endpoint,
+                                                   our_uid=our_uid,
                                                    env=env)
                                                    env=env)
     Common.del_logs(log_type, crawler)
     Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info("抓取完一轮\n")
 
 
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":
@@ -54,11 +66,9 @@ if __name__ == "__main__":
     parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
     parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
     parser.add_argument('--crawler')  ## 添加参数
     parser.add_argument('--crawler')  ## 添加参数
     parser.add_argument('--task')  ## 添加参数
     parser.add_argument('--task')  ## 添加参数
-    parser.add_argument('--oss_endpoint')  ## 添加参数
     parser.add_argument('--env')  ## 添加参数
     parser.add_argument('--env')  ## 添加参数
     args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
     args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
     main(log_type=args.log_type,
     main(log_type=args.log_type,
          crawler=args.crawler,
          crawler=args.crawler,
          task=args.task,
          task=args.task,
-         oss_endpoint=args.oss_endpoint,
          env=args.env)
          env=args.env)

+ 4 - 4
xigua/xigua_search/xigua_search_new.py

@@ -722,10 +722,10 @@ class XiguaSearchNew:
                         for k, v in video_dict.items():
                         for k, v in video_dict.items():
                             Common.logger(log_type, crawler).info(f"{k}:{v}")
                             Common.logger(log_type, crawler).info(f"{k}:{v}")
                         rule_dict = cls.get_rule_dict(log_type, crawler)
                         rule_dict = cls.get_rule_dict(log_type, crawler)
-                        if int((int(time.time()) - int(video_dict["publish_time_stamp"])) / (3600 * 24)) > int(rule_dict["publish_time"]):
-                            Common.logger(log_type, crawler).info(f'发布时间超过{int(rule_dict["publish_time"])}天\n')
-                            driver.quit()
-                            return
+                        # if int((int(time.time()) - int(video_dict["publish_time_stamp"])) / (3600 * 24)) > int(rule_dict["publish_time"]):
+                        #     Common.logger(log_type, crawler).info(f'发布时间超过{int(rule_dict["publish_time"])}天\n')
+                        #     driver.quit()
+                        #     return
                         if cls.download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
                         if cls.download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
                             Common.logger(log_type, crawler).info("不满足抓取规则\n")
                             Common.logger(log_type, crawler).info("不满足抓取规则\n")
                         elif any(str(word) if str(word) in video_dict["video_title"] else False for word in cls.filter_words(log_type, crawler, env)) is True:
                         elif any(str(word) if str(word) in video_dict["video_title"] else False for word in cls.filter_words(log_type, crawler, env)) is True: