wangkun 2 năm trước cách đây
mục cha
commit
d9549914ba

+ 1 - 0
README.MD

@@ -116,6 +116,7 @@ nohup python3 -u xiaoniangao/xiaoniangao_follow/insert_video_3.py >> xiaoniangao
 ps aux | grep run_xiaoniangao_follow
 ps aux | grep run_xiaoniangao_hour
 ps aux | grep run_xiaoniangao_play
+ps aux | grep run_xiaoniangao | grep -v grep | awk '{print $2}' | xargs kill -9 
 ps aux | grep run_xiaoniangao_follow | grep -v grep | awk '{print $2}' | xargs kill -9 
 ps aux | grep run_xiaoniangao_hour | grep -v grep | awk '{print $2}' | xargs kill -9 
 ps aux | grep run_xiaoniangao_play | grep -v grep | awk '{print $2}' | xargs kill -9 

+ 0 - 2
common/public.py

@@ -81,14 +81,12 @@ def random_title(log_type, crawler, env, text):
 def task_fun(task_str):
     task_str = task_str.replace("'[{", '[{').replace("}}]'", '}}]')
     task_dict = dict(eval(task_str))
-
     rule = task_dict['rule']
     task_dict['rule'] = dict()
     for item in rule:
         for k, val in item.items():
             task_dict['rule'][k] = val
     rule_dict = task_dict['rule']
-
     task_dict = {
         "task_dict": task_dict,
         "rule_dict": rule_dict

+ 6 - 7
scheduling/scheduling_v3/crawler_scheduling_v3.py

@@ -4,7 +4,6 @@
 import os
 import sys
 import time
-
 sys.path.append(os.getcwd())
 from common.common import Common
 from common.scheduling_db import MysqlHelper, RedisHelper
@@ -46,17 +45,17 @@ class SchedulingV3:
                 task_id = pre_task['id']
                 if machine == "hk":
                     # 写入 redis
-                    task_key = 'crawler_config_task_queue:hk'
+                    task_key = 'crawler_config_task_queue'
                     RedisHelper.redis_push(env, task_key, str(pre_task))
                     Common.logger(log_type, crawler).info(f"写入Redis成功:{str(pre_task)}")
                 elif machine == "aliyun":
                     # 写入 redis
-                    task_key = 'crawler_config_task_queue:aliyun'
+                    task_key = 'crawler_config_task_queue'
                     RedisHelper.redis_push(env, task_key, str(pre_task))
                     Common.logger(log_type, crawler).info(f"写入Redis成功:{str(pre_task)}")
                 else:
                     # 写入 redis
-                    task_key = 'crawler_config_task_queue:dev'
+                    task_key = 'crawler_config_task_queue'
                     RedisHelper.redis_push(env, task_key, str(pre_task))
                     Common.logger(log_type, crawler).info(f"写入Redis成功:{str(pre_task)}")
                 if int(time.time()*1000) >= next_time:
@@ -65,11 +64,11 @@ class SchedulingV3:
     @classmethod
     def get_redis(cls, log_type, crawler, env):
         if env == 'hk':
-            task_key = 'crawler_config_task_queue:hk'
+            task_key = 'crawler_config_task_queue'
         elif env == 'prod':
-            task_key = 'crawler_config_task_queue:aliyun'
+            task_key = 'crawler_config_task_queue'
         else:
-            task_key = 'crawler_config_task_queue:dev'
+            task_key = 'crawler_config_task_queue'
 
         redis_data = RedisHelper.redis_pop(env, task_key)
         if redis_data is None or len(redis_data) == 0:

+ 1 - 1
scheduling/scheduling_v3/scheduling_v3.sh

@@ -14,7 +14,7 @@ oss_endpoint=$5 # OSS网关,内网: inner / 外网: out / 香港: hk
 env=$6          # 爬虫运行环境,正式环境: prod / 测试环境: dev
 nohup_dir=$7    # nohup日志存储路径,如: ./youtube/nohup.log
 
-echo "开始"
+echo "$(date "+%Y-%m-%d %H:%M:%S") 开始调度爬虫任务"
 echo "crawler_dir:"${crawler_dir}
 echo "log_type:"${log_type}
 echo "crawler:"${crawler}

BIN
xiaoniangao/.DS_Store


BIN
xiaoniangao/videos/.DS_Store


+ 19 - 0
xiaoniangao/xiaoniangao_author/author_test.py

@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/21
+from common.scheduling_db import MysqlHelper
+
+
+class AuthorTest:
+    @classmethod
+    def get_users(cls, log_type, crawler, env):
+        select_user_sql = f"""select * from crawler_user_v3 where task_id=16"""
+        user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+        print(len(user_list))
+        for user in user_list:
+            print(type(user))
+            print(user)
+
+
+if __name__ == "__main__":
+    AuthorTest.get_users("test", "xiaoniangao", "dev")

Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 73 - 36
xiaoniangao/xiaoniangao_author/xiaoniangao_author_scheduling.py


+ 125 - 60
xiaoniangao/xiaoniangao_hour/xiaoniangao_hour_scheduling.py

@@ -45,68 +45,85 @@ class XiaoniangaoHourScheduling:
         :param rule_dict: 规则信息,字典格式
         :return: 满足规则,返回 True;反之,返回 False
         """
-        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
-        # rule_fans_min = rule_dict.get('fans', {}).get('min', 0)
-        # rule_fans_max = rule_dict.get('fans', {}).get('max', 100000000)
-        # rule_videos_min = rule_dict.get('videos', {}).get('min', 0)
-        # rule_videos_max = rule_dict.get('videos', {}).get('max', 100000000)
-        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
-        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
-        if rule_duration_max == 0:
-            rule_duration_max = 100000000
         rule_playCnt_min = rule_dict.get('playCnt', {}).get('min', 0)
         rule_playCnt_max = rule_dict.get('playCnt', {}).get('max', 100000000)
         if rule_playCnt_max == 0:
             rule_playCnt_max = 100000000
+
+        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
+        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
+        if rule_duration_max == 0:
+            rule_duration_max = 100000000
+
         rule_period_min = rule_dict.get('period', {}).get('min', 0)
+        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
+        # if rule_period_max == 0:
+        #     rule_period_max = 100000000
+        #
+        # rule_fans_min = rule_dict.get('fans', {}).get('min', 0)
+        # rule_fans_max = rule_dict.get('fans', {}).get('max', 100000000)
+        # if rule_fans_max == 0:
+        #     rule_fans_max = 100000000
+        #
+        # rule_videos_min = rule_dict.get('videos', {}).get('min', 0)
+        # rule_videos_max = rule_dict.get('videos', {}).get('max', 100000000)
+        # if rule_videos_max == 0:
+        #     rule_videos_max = 100000000
+
         rule_like_min = rule_dict.get('like', {}).get('min', 0)
         rule_like_max = rule_dict.get('like', {}).get('max', 100000000)
         if rule_like_max == 0:
             rule_like_max = 100000000
+
         rule_videoWidth_min = rule_dict.get('videoWidth', {}).get('min', 0)
         rule_videoWidth_max = rule_dict.get('videoWidth', {}).get('max', 100000000)
         if rule_videoWidth_max == 0:
             rule_videoWidth_max = 100000000
-        rule_videoHeight_min = rule_dict.get('videoWidth', {}).get('min', 0)
-        rule_videoHeight_max = rule_dict.get('videoWidth', {}).get('max', 100000000)
+
+        rule_videoHeight_min = rule_dict.get('videoHeight', {}).get('min', 0)
+        rule_videoHeight_max = rule_dict.get('videoHeight', {}).get('max', 100000000)
         if rule_videoHeight_max == 0:
             rule_videoHeight_max = 100000000
-        Common.logger(log_type, crawler).info(f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
-        Common.logger(log_type, crawler).info(f'rule_playCnt_max:{int(rule_playCnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_playCnt_min:{int(rule_playCnt_min)}')
-        Common.logger(log_type, crawler).info(f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
-        Common.logger(log_type, crawler).info(f'rule_like_max:{int(rule_like_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_min:{int(rule_like_min)}')
-        Common.logger(log_type, crawler).info(f'rule_videoWidth_max:{int(rule_videoWidth_max)} >= video_width:{int(video_dict["video_width"])} >= rule_videoWidth_min:{int(rule_videoWidth_min)}')
-        Common.logger(log_type, crawler).info(f'rule_videoHeight_max:{int(rule_videoHeight_max)} >= video_height:{int(video_dict["video_height"])} >= rule_videoHeight_min:{int(rule_videoHeight_min)}')
+
+        rule_shareCnt_min = rule_dict.get('shareCnt', {}).get('min', 0)
+        rule_shareCnt_max = rule_dict.get('shareCnt', {}).get('max', 100000000)
+        if rule_shareCnt_max == 0:
+            rule_shareCnt_max = 100000000
+
+        rule_commentCnt_min = rule_dict.get('commentCnt', {}).get('min', 0)
+        rule_commentCnt_max = rule_dict.get('commentCnt', {}).get('max', 100000000)
+        if rule_commentCnt_max == 0:
+            rule_commentCnt_max = 100000000
+
+        Common.logger(log_type, crawler).info(
+            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_playCnt_max:{int(rule_playCnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_playCnt_min:{int(rule_playCnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_like_max:{int(rule_like_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_min:{int(rule_like_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_commentCnt_max:{int(rule_commentCnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_commentCnt_min:{int(rule_commentCnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_shareCnt_max:{int(rule_shareCnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_shareCnt_min:{int(rule_shareCnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_videoWidth_max:{int(rule_videoWidth_max)} >= video_width:{int(video_dict["video_width"])} >= rule_videoWidth_min:{int(rule_videoWidth_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_videoHeight_max:{int(rule_videoHeight_max)} >= video_height:{int(video_dict["video_height"])} >= rule_videoHeight_min:{int(rule_videoHeight_min)}')
+
         if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
                 and int(rule_playCnt_max) >= int(video_dict['play_cnt']) >= int(rule_playCnt_min) \
-                and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min)\
-                and int(rule_like_max) >= int(video_dict['like_cnt']) >= int(rule_like_min)\
-                and int(rule_videoWidth_max) >= int(video_dict['video_width']) >= int(rule_videoWidth_min)\
+                and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min) \
+                and int(rule_like_max) >= int(video_dict['like_cnt']) >= int(rule_like_min) \
+                and int(rule_commentCnt_max) >= int(video_dict['comment_cnt']) >= int(rule_commentCnt_min) \
+                and int(rule_shareCnt_max) >= int(video_dict['share_cnt']) >= int(rule_shareCnt_min) \
+                and int(rule_videoWidth_max) >= int(video_dict['video_width']) >= int(rule_videoWidth_min) \
                 and int(rule_videoHeight_max) >= int(video_dict['video_height']) >= int(rule_videoHeight_min):
             return True
         else:
             return False
 
-        # if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min):
-        #     if int(rule_playCnt_max) >= int(video_dict['play_cnt']) >= 0:
-        #         if int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * 365:
-        #             if int(rule_like_max) >= int(video_dict['like_cnt']) >= int(rule_like_min):
-        #                 if int(rule_videoWidth_max) >= int(video_dict['video_width']) >= int(rule_videoWidth_min):
-        #                     if int(rule_videoHeight_max) >= int(video_dict['video_height']) >= int(rule_videoHeight_min):
-        #                         return True
-        #                     else:
-        #                         return False
-        #                 else:
-        #                     return False
-        #             else:
-        #                 return False
-        #         else:
-        #             return False
-        #     else:
-        #         return False
-        # else:
-        #     return False
-
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
         sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
@@ -412,7 +429,7 @@ class XiaoniangaoHourScheduling:
 
     # 更新小时榜数据
     @classmethod
-    def update_videoList(cls, log_type, crawler, strategy, oss_endpoint, env):
+    def update_videoList(cls, log_type, crawler, rule_dict, strategy, oss_endpoint, env):
         """
         更新小时榜数据
         """
@@ -440,8 +457,14 @@ class XiaoniangaoHourScheduling:
                 update_sql = f""" update crawler_xiaoniangao_hour set ten_play_cnt={ten_play_cnt} WHERE out_video_id="{video_id}"; """
                 # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
                 MysqlHelper.update_values(log_type, crawler, update_sql, env)
-                cls.download_publish(log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint,
-                                     env)
+                cls.download_publish(log_type=log_type,
+                                     crawler=crawler,
+                                     video_info_dict=video_info_dict,
+                                     rule_dict=rule_dict,
+                                     update_video_info=update_video_info,
+                                     strategy=strategy,
+                                     oss_endpoint=oss_endpoint,
+                                     env=env)
             elif datetime.datetime.now().hour == 15 and datetime.datetime.now().minute <= 10:
                 video_info_dict = cls.get_video_info(log_type=log_type,
                                                      crawler=crawler,
@@ -454,8 +477,14 @@ class XiaoniangaoHourScheduling:
                 update_sql = f""" update crawler_xiaoniangao_hour set fifteen_play_cnt={fifteen_play_cnt} WHERE out_video_id="{video_id}"; """
                 # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
                 MysqlHelper.update_values(log_type, crawler, update_sql, env)
-                cls.download_publish(log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint,
-                                     env)
+                cls.download_publish(log_type=log_type,
+                                     crawler=crawler,
+                                     video_info_dict=video_info_dict,
+                                     rule_dict=rule_dict,
+                                     update_video_info=update_video_info,
+                                     strategy=strategy,
+                                     oss_endpoint=oss_endpoint,
+                                     env=env)
             elif datetime.datetime.now().hour == 20 and datetime.datetime.now().minute <= 10:
                 video_info_dict = cls.get_video_info(log_type=log_type,
                                                      crawler=crawler,
@@ -468,13 +497,19 @@ class XiaoniangaoHourScheduling:
                 update_sql = f""" update crawler_xiaoniangao_hour set twenty_play_cnt={twenty_play_cnt} WHERE out_video_id="{video_id}"; """
                 # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
                 MysqlHelper.update_values(log_type, crawler, update_sql, env)
-                cls.download_publish(log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint,
-                                     env)
+                cls.download_publish(log_type=log_type,
+                                     crawler=crawler,
+                                     video_info_dict=video_info_dict,
+                                     rule_dict=rule_dict,
+                                     update_video_info=update_video_info,
+                                     strategy=strategy,
+                                     oss_endpoint=oss_endpoint,
+                                     env=env)
             else:
                 pass
 
     @classmethod
-    def download(cls, log_type, crawler, video_info_dict, strategy, oss_endpoint, env):
+    def download(cls, log_type, crawler, video_info_dict, rule_dict, strategy, oss_endpoint, env):
         # 下载封面
         Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_info_dict["video_title"],
                                url=video_info_dict["cover_url"])
@@ -503,12 +538,12 @@ class XiaoniangaoHourScheduling:
             shutil.rmtree(f"./{crawler}/videos/{video_info_dict['video_title']}")
             return
 
-        # 视频信息保存数据库
-        rule_dict = {
-            "duration": {"min": 40},
-            "play_cnt": {"min": 4000},
-            "publish_day": {"min": 10}
-        }
+        # # 视频信息保存数据库
+        # rule_dict = {
+        #     "duration": {"min": 40},
+        #     "play_cnt": {"min": 4000},
+        #     "publish_day": {"min": 10}
+        # }
 
         insert_sql = f""" insert into crawler_video(video_id,
                                                         out_user_id,
@@ -570,14 +605,20 @@ class XiaoniangaoHourScheduling:
 
     # 下载/上传
     @classmethod
-    def download_publish(cls, log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint, env):
+    def download_publish(cls, log_type, crawler, video_info_dict, rule_dict, update_video_info, strategy, oss_endpoint, env):
         if cls.repeat_video(log_type, crawler, video_info_dict["video_id"], env) != 0:
             Common.logger(log_type, crawler).info('视频已下载\n')
         # 播放量大于 50000,直接下载
         elif int(video_info_dict["play_cnt"]) >= 50000:
             Common.logger(log_type, crawler).info(
                 f"播放量:{video_info_dict['play_cnt']} >= 50000,满足下载规则,开始下载视频")
-            cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
+            cls.download(log_type=log_type,
+                         crawler=crawler,
+                         video_info_dict=video_info_dict,
+                         rule_dict=rule_dict,
+                         strategy=strategy,
+                         oss_endpoint=oss_endpoint,
+                         env=env)
 
         # 上升榜判断逻辑,任意时间段上升量>=5000,连续两个时间段上升量>=2000
         elif int(update_video_info['ten_play_cnt']) >= 5000 or int(
@@ -585,25 +626,49 @@ class XiaoniangaoHourScheduling:
             Common.logger(log_type, crawler).info(
                 f"10:00 or 15:00 or 20:00 数据上升量:{int(update_video_info['ten_play_cnt'])} or {int(update_video_info['fifteen_play_cnt'])} or {int(update_video_info['twenty_play_cnt'])} >= 5000")
             Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
-            cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
+            cls.download(log_type=log_type,
+                         crawler=crawler,
+                         video_info_dict=video_info_dict,
+                         rule_dict=rule_dict,
+                         strategy=strategy,
+                         oss_endpoint=oss_endpoint,
+                         env=env)
 
         elif int(update_video_info['ten_play_cnt']) >= 2000 and int(update_video_info['fifteen_play_cnt']) >= 2000:
             Common.logger(log_type, crawler).info(
                 f"10:00 and 15:00 数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['fifteen_play_cnt'])} >= 2000")
             Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
-            cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
+            cls.download(log_type=log_type,
+                         crawler=crawler,
+                         video_info_dict=video_info_dict,
+                         rule_dict=rule_dict,
+                         strategy=strategy,
+                         oss_endpoint=oss_endpoint,
+                         env=env)
 
         elif int(update_video_info['fifteen_play_cnt']) >= 2000 and int(update_video_info['twenty_play_cnt']) >= 2000:
             Common.logger(log_type, crawler).info(
                 f"15:00 and 20:00 数据上升量:{int(update_video_info['fifteen_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 2000")
             Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
-            cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
+            cls.download(log_type=log_type,
+                         crawler=crawler,
+                         video_info_dict=video_info_dict,
+                         rule_dict=rule_dict,
+                         strategy=strategy,
+                         oss_endpoint=oss_endpoint,
+                         env=env)
 
         elif int(update_video_info['ten_play_cnt']) >= 2000 and int(update_video_info['twenty_play_cnt']) >= 2000:
             Common.logger(log_type, crawler).info(
                 f"今日10:00 / 20:00数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 2000")
             Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
-            cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
+            cls.download(log_type=log_type,
+                         crawler=crawler,
+                         video_info_dict=video_info_dict,
+                         rule_dict=rule_dict,
+                         strategy=strategy,
+                         oss_endpoint=oss_endpoint,
+                         env=env)
 
         else:
             Common.logger(log_type, crawler).info("上升量不满足下载规则")

+ 8 - 1
xiaoniangao/xiaoniangao_main/run_xiaoniangao_author_scheduling.py

@@ -7,18 +7,25 @@ import sys
 sys.path.append(os.getcwd())
 from common.public import task_fun
 from common.common import Common
+from common.scheduling_db import MysqlHelper
 from xiaoniangao.xiaoniangao_author.xiaoniangao_author_scheduling import XiaoniangaoAuthorScheduling
 
 
 def main(log_type, crawler, task, oss_endpoint, env):
     task_dict = task_fun(task)['task_dict']
     rule_dict = task_fun(task)['rule_dict']
+    task_id = task_dict['task_id']
+    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
     Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
     Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
+    Common.logger(log_type, crawler).info(f"用户列表:\n{user_list}")
     Common.logger(log_type, crawler).info('开始抓取 小年糕 定向榜\n')
     XiaoniangaoAuthorScheduling.get_follow_videos(log_type=log_type,
                                         crawler=crawler,
-                                        strategy="定向爬虫策略",
+                                        user_list=user_list,
+                                        rule_dict=rule_dict,
+                                        strategy="定向榜爬虫策略",
                                         oss_endpoint=oss_endpoint,
                                         env=env)
     Common.del_logs(log_type, crawler)

+ 17 - 13
xiaoniangao/xiaoniangao_main/run_xiaoniangao_hour_scheduling.py

@@ -5,6 +5,7 @@ import argparse
 import datetime
 import os
 import sys
+
 sys.path.append(os.getcwd())
 from common.common import Common
 from common.public import task_fun
@@ -22,26 +23,29 @@ def main(log_type, crawler, task, oss_endpoint, env):
     if now.hour == 10 and 0 <= now.minute <= 10:
         Common.logger(log_type, crawler).info("开始更新/下载上升榜")
         XiaoniangaoHourScheduling.update_videoList(log_type=log_type,
-                                         crawler=crawler,
-                                         strategy="小时榜爬虫策略",
-                                         oss_endpoint=oss_endpoint,
-                                         env=env)
+                                                   crawler=crawler,
+                                                   rule_dict=rule_dict,
+                                                   strategy="小时榜爬虫策略",
+                                                   oss_endpoint=oss_endpoint,
+                                                   env=env)
 
     elif now.hour == 15 and now.minute <= 10:
         Common.logger(log_type, crawler).info("开始更新/下载上升榜")
         XiaoniangaoHourScheduling.update_videoList(log_type=log_type,
-                                         crawler=crawler,
-                                         strategy="小时榜爬虫策略",
-                                         oss_endpoint=oss_endpoint,
-                                         env=env)
+                                                   crawler=crawler,
+                                                   rule_dict=rule_dict,
+                                                   strategy="小时榜爬虫策略",
+                                                   oss_endpoint=oss_endpoint,
+                                                   env=env)
 
     elif now.hour == 20 and now.minute <= 10:
         Common.logger(log_type, crawler).info("开始更新/下载上升榜")
         XiaoniangaoHourScheduling.update_videoList(log_type=log_type,
-                                         crawler=crawler,
-                                         strategy="小时榜爬虫策略",
-                                         oss_endpoint=oss_endpoint,
-                                         env=env)
+                                                   crawler=crawler,
+                                                   rule_dict=rule_dict,
+                                                   strategy="小时榜爬虫策略",
+                                                   oss_endpoint=oss_endpoint,
+                                                   env=env)
     Common.del_logs(log_type, crawler)
 
 
@@ -57,4 +61,4 @@ if __name__ == "__main__":
          crawler=args.crawler,
          task=args.task,
          oss_endpoint=args.oss_endpoint,
-         env=args.env)
+         env=args.env)

+ 45 - 0
xiaoniangao/xiaoniangao_main/run_xiaoniangao_play_scheduling.py

@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/21
+import argparse
+import datetime
+import os
+import sys
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.public import task_fun
+
+from xiaoniangao.xiaoniangao_play.xiaoniangao_play_scheduling import XiaoniangaoPlayScheduling
+
+
+def main(log_type, crawler, task, oss_endpoint, env):
+    task_dict = task_fun(task)['task_dict']
+    rule_dict = task_fun(task)['rule_dict']
+    Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
+    Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
+    # 获取符合规则的视频,写入小时级数据_feeds
+    XiaoniangaoPlayScheduling.get_videoList(log_type=log_type,
+                                            crawler=crawler,
+                                            rule_dict=rule_dict,
+                                            strategy="小时榜爬虫策略",
+                                            oss_endpoint=oss_endpoint,
+                                            env=env)
+
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取完一轮\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--task')  ## 添加参数
+    parser.add_argument('--oss_endpoint')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         task=args.task,
+         oss_endpoint=args.oss_endpoint,
+         env=args.env)

+ 398 - 0
xiaoniangao/xiaoniangao_play/xiaoniangao_play_scheduling.py

@@ -0,0 +1,398 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/16
+import json
+import os
+import random
+import shutil
+import sys
+import time
+import requests
+import urllib3
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.publish import Publish
+from common.scheduling_db import MysqlHelper
+from common.public import get_config_from_mysql
+proxies = {"http": None, "https": None}
+
+
+class XiaoniangaoPlayScheduling:
+    platform = "小年糕"
+
+    # 生成 uid、token
+    @classmethod
+    def get_uid_token(cls):
+        words = "abcdefghijklmnopqrstuvwxyz0123456789"
+        uid = f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}"""
+        token = "".join(random.sample(words, 32))
+        uid_token_dict = {
+            "uid": uid,
+            "token": token
+        }
+        return uid_token_dict
+
+    # 基础门槛规则
+    @staticmethod
+    def download_rule(log_type, crawler, video_dict, rule_dict):
+        """
+        下载视频的基本规则
+        :param log_type: 日志
+        :param crawler: 哪款爬虫
+        :param video_dict: 视频信息,字典格式
+        :param rule_dict: 规则信息,字典格式
+        :return: 满足规则,返回 True;反之,返回 False
+        """
+        rule_playCnt_min = rule_dict.get('playCnt', {}).get('min', 0)
+        rule_playCnt_max = rule_dict.get('playCnt', {}).get('max', 100000000)
+        if rule_playCnt_max == 0:
+            rule_playCnt_max = 100000000
+
+        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
+        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
+        if rule_duration_max == 0:
+            rule_duration_max = 100000000
+
+        rule_period_min = rule_dict.get('period', {}).get('min', 0)
+        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
+        # if rule_period_max == 0:
+        #     rule_period_max = 100000000
+        #
+        # rule_fans_min = rule_dict.get('fans', {}).get('min', 0)
+        # rule_fans_max = rule_dict.get('fans', {}).get('max', 100000000)
+        # if rule_fans_max == 0:
+        #     rule_fans_max = 100000000
+        #
+        # rule_videos_min = rule_dict.get('videos', {}).get('min', 0)
+        # rule_videos_max = rule_dict.get('videos', {}).get('max', 100000000)
+        # if rule_videos_max == 0:
+        #     rule_videos_max = 100000000
+
+        rule_like_min = rule_dict.get('like', {}).get('min', 0)
+        rule_like_max = rule_dict.get('like', {}).get('max', 100000000)
+        if rule_like_max == 0:
+            rule_like_max = 100000000
+
+        rule_videoWidth_min = rule_dict.get('videoWidth', {}).get('min', 0)
+        rule_videoWidth_max = rule_dict.get('videoWidth', {}).get('max', 100000000)
+        if rule_videoWidth_max == 0:
+            rule_videoWidth_max = 100000000
+
+        rule_videoHeight_min = rule_dict.get('videoHeight', {}).get('min', 0)
+        rule_videoHeight_max = rule_dict.get('videoHeight', {}).get('max', 100000000)
+        if rule_videoHeight_max == 0:
+            rule_videoHeight_max = 100000000
+
+        rule_shareCnt_min = rule_dict.get('shareCnt', {}).get('min', 0)
+        rule_shareCnt_max = rule_dict.get('shareCnt', {}).get('max', 100000000)
+        if rule_shareCnt_max == 0:
+            rule_shareCnt_max = 100000000
+
+        rule_commentCnt_min = rule_dict.get('commentCnt', {}).get('min', 0)
+        rule_commentCnt_max = rule_dict.get('commentCnt', {}).get('max', 100000000)
+        if rule_commentCnt_max == 0:
+            rule_commentCnt_max = 100000000
+
+        Common.logger(log_type, crawler).info(
+            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_playCnt_max:{int(rule_playCnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_playCnt_min:{int(rule_playCnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_like_max:{int(rule_like_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_min:{int(rule_like_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_commentCnt_max:{int(rule_commentCnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_commentCnt_min:{int(rule_commentCnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_shareCnt_max:{int(rule_shareCnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_shareCnt_min:{int(rule_shareCnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_videoWidth_max:{int(rule_videoWidth_max)} >= video_width:{int(video_dict["video_width"])} >= rule_videoWidth_min:{int(rule_videoWidth_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_videoHeight_max:{int(rule_videoHeight_max)} >= video_height:{int(video_dict["video_height"])} >= rule_videoHeight_min:{int(rule_videoHeight_min)}')
+
+        if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
+                and int(rule_playCnt_max) >= int(video_dict['play_cnt']) >= int(rule_playCnt_min) \
+                and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min) \
+                and int(rule_like_max) >= int(video_dict['like_cnt']) >= int(rule_like_min) \
+                and int(rule_commentCnt_max) >= int(video_dict['comment_cnt']) >= int(rule_commentCnt_min) \
+                and int(rule_shareCnt_max) >= int(video_dict['share_cnt']) >= int(rule_shareCnt_min) \
+                and int(rule_videoWidth_max) >= int(video_dict['video_width']) >= int(rule_videoWidth_min) \
+                and int(rule_videoHeight_max) >= int(video_dict['video_height']) >= int(rule_videoHeight_min):
+            return True
+        else:
+            return False
+
+    # 获取列表
+    @classmethod
+    def get_videoList(cls, log_type, crawler, rule_dict, strategy, oss_endpoint, env):
+        uid_token_dict = cls.get_uid_token()
+        url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
+        headers = {
+            "x-b3-traceid": '1dc0a6d0929a2b',
+            "X-Token-Id": 'ae99a4953804085ebb0ae36fa138031d-1146052582',
+            "uid": uid_token_dict['uid'],
+            "content-type": "application/json",
+            "Accept-Encoding": "gzip,compress,br,deflate",
+            "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
+                          ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
+                          'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
+            "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/620/page-frame.html'
+        }
+        data = {
+            "log_params": {
+                "page": "discover_rec",
+                "common": {
+                    "brand": "iPhone",
+                    "device": "iPhone 11",
+                    "os": "iOS 14.7.1",
+                    "weixinver": "8.0.20",
+                    "srcver": "2.24.2",
+                    "net": "wifi",
+                    "scene": 1089
+                }
+            },
+            "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!750x500r/crop/750x500/interlace/1/format/jpg",
+            "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!80x80r/crop/80x80/interlace/1/format/jpg",
+            "share_width": 625,
+            "share_height": 500,
+            "ext": {
+                "fmid": 0,
+                "items": {}
+            },
+            "app": "xng",
+            "rec_scene": "discover_rec",
+            "log_common_params": {
+                "e": [{
+                    "data": {
+                        "page": "discoverIndexPage",
+                        "topic": "recommend"
+                    },
+                    "ab": {}
+                }],
+                "ext": {
+                    "brand": "iPhone",
+                    "device": "iPhone 11",
+                    "os": "iOS 14.7.1",
+                    "weixinver": "8.0.20",
+                    "srcver": "2.24.3",
+                    "net": "wifi",
+                    "scene": "1089"
+                },
+                "pj": "1",
+                "pf": "2",
+                "session_id": "7bcce313-b57d-4305-8d14-6ebd9a1bad29"
+            },
+            "refresh": False,
+            # "token": cls.play_token,
+            "token": uid_token_dict['token'],
+            # "uid": cls.play_uid,
+            "uid": uid_token_dict['uid'],
+            "proj": "ma",
+            "wx_ver": "8.0.20",
+            "code_ver": "3.62.0"
+        }
+        urllib3.disable_warnings()
+        r = requests.post(url=url, headers=headers, json=data, proxies=proxies, verify=False)
+        if "data" not in r.text or r.status_code != 200:
+            Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}")
+            return
+        elif "data" not in r.json():
+            Common.logger(log_type, crawler).info(f"get_videoList:{r.json()}")
+            return
+        elif "list" not in r.json()["data"]:
+            Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']}")
+            return
+        elif len(r.json()["data"]["list"]) == 0:
+            Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']['list']}")
+            return
+        else:
+            # 视频列表数据
+            feeds = r.json()["data"]["list"]
+            for i in range(len(feeds)):
+                # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
+                xiaoniangao_title = feeds[i].get("title", "").strip().replace("\n", "") \
+                    .replace("/", "").replace("\r", "").replace("#", "") \
+                    .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
+                    .replace(":", "").replace("*", "").replace("?", "") \
+                    .replace("?", "").replace('"', "").replace("<", "") \
+                    .replace(">", "").replace("|", "").replace(" ", "") \
+                    .replace('"', '').replace("'", '')
+                # 随机取一个表情/符号
+                emoji = random.choice(get_config_from_mysql(log_type, crawler, env, "emoji"))
+                # 生成最终标题,标题list[表情+title, title+表情]随机取一个
+                video_title = random.choice([f"{emoji}{xiaoniangao_title}", f"{xiaoniangao_title}{emoji}"])
+
+                # 视频 ID
+                video_id = feeds[i].get("vid", "")
+                # 播放量
+                play_cnt = feeds[i].get("play_pv", 0)
+                # 点赞量
+                like_cnt = feeds[i].get("favor", {}).get("total", 0)
+                # 评论数
+                comment_cnt = feeds[i].get("comment_count", 0)
+                # 分享量
+                share_cnt = feeds[i].get("share", 0)
+                # 时长
+                duration = int(feeds[i].get("du", 0) / 1000)
+                # 宽和高
+                video_width = int(feeds[i].get("w", 0))
+                video_height = int(feeds[i].get("h", 0))
+                # 发布时间
+                publish_time_stamp = int(int(feeds[i].get("t", 0)) / 1000)
+                publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                # 用户名 / 头像
+                user_name = feeds[i].get("user", {}).get("nick", "").strip().replace("\n", "") \
+                    .replace("/", "").replace("快手", "").replace(" ", "") \
+                    .replace(" ", "").replace("&NBSP", "").replace("\r", "")
+                avatar_url = feeds[i].get("user", {}).get("hurl", "")
+                # 用户 ID
+                profile_id = feeds[i]["id"]
+                # 用户 mid
+                profile_mid = feeds[i]["user"]["mid"]
+                # 视频封面
+                cover_url = feeds[i].get("url", "")
+                # 视频播放地址
+                video_url = feeds[i].get("v_url", "")
+
+                video_dict = {
+                    "video_title": video_title,
+                    "video_id": video_id,
+                    "duration": duration,
+                    "play_cnt": play_cnt,
+                    "like_cnt": like_cnt,
+                    "comment_cnt": comment_cnt,
+                    "share_cnt": share_cnt,
+                    "user_name": user_name,
+                    "publish_time_stamp": publish_time_stamp,
+                    "publish_time_str": publish_time_str,
+                    "video_width": video_width,
+                    "video_height": video_height,
+                    "avatar_url": avatar_url,
+                    "profile_id": profile_id,
+                    "profile_mid": profile_mid,
+                    "cover_url": cover_url,
+                    "video_url": video_url,
+                    "session": f"xiaoniangao-play-{int(time.time())}"
+
+                }
+                for k, v in video_dict.items():
+                    Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                # 过滤无效视频
+                if video_title == "" or video_id == "" or video_url == "":
+                    Common.logger(log_type, crawler).warning("无效视频\n")
+                # 抓取基础规则过滤
+                elif cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
+                    Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                    Common.logger(log_type, crawler).info('视频已下载\n')
+                # 过滤敏感词
+                elif any(str(word) if str(word) in video_title else False for word in get_config_from_mysql(log_type, crawler, env, "filter", action="")) is True:
+                    Common.logger(log_type, crawler).info("视频已中过滤词\n")
+                else:
+                    cls.download_publish(log_type=log_type,
+                                         crawler=crawler,
+                                         video_dict=video_dict,
+                                         rule_dict=rule_dict,
+                                         strategy=strategy,
+                                         oss_endpoint=oss_endpoint,
+                                         env=env)
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def download_publish(cls, log_type, crawler, video_dict, rule_dict, strategy, oss_endpoint, env):
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict["video_title"], url=video_dict["cover_url"])
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
+        # 保存视频信息至 "./videos/{download_video_title}/info.txt"
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                  crawler=crawler,
+                                                  strategy=strategy,
+                                                  our_uid="play",
+                                                  env=env,
+                                                  oss_endpoint=oss_endpoint)
+        if env == "dev":
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        else:
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        Common.logger(log_type, crawler).info("视频上传完成")
+
+        if our_video_id is None:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+            return
+
+        insert_sql = f""" insert into crawler_video(video_id,
+                                                    out_user_id,
+                                                    platform,
+                                                    strategy,
+                                                    out_video_id,
+                                                    video_title,
+                                                    cover_url,
+                                                    video_url,
+                                                    duration,
+                                                    publish_time,
+                                                    play_cnt,
+                                                    crawler_rule,
+                                                    width,
+                                                    height)
+                                                    values({our_video_id},
+                                                    "{video_dict['profile_id']}",
+                                                    "{cls.platform}",
+                                                    "播放量榜爬虫策略",
+                                                    "{video_dict['video_id']}",
+                                                    "{video_dict['video_title']}",
+                                                    "{video_dict['cover_url']}",
+                                                    "{video_dict['video_url']}",
+                                                    {int(video_dict['duration'])},
+                                                    "{video_dict['publish_time_str']}",
+                                                    {int(video_dict['play_cnt'])},
+                                                    '{json.dumps(rule_dict)}',
+                                                    {int(video_dict['video_width'])},
+                                                    {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+        Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+
+        # 视频写入飞书
+        Feishu.insert_columns(log_type, crawler, "c85k1C", "ROWS", 1, 2)
+        # 视频ID工作表,首行写入数据
+        upload_time = int(time.time())
+        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                   "播放量榜爬虫策略",
+                   str(video_dict['video_id']),
+                   str(video_dict['video_title']),
+                   our_video_link,
+                   video_dict['play_cnt'],
+                   video_dict['comment_cnt'],
+                   video_dict['like_cnt'],
+                   video_dict['share_cnt'],
+                   video_dict['duration'],
+                   f"{video_dict['video_width']}*{video_dict['video_height']}",
+                   str(video_dict['publish_time_str']),
+                   str(video_dict['user_name']),
+                   str(video_dict['profile_id']),
+                   str(video_dict['profile_mid']),
+                   str(video_dict['avatar_url']),
+                   str(video_dict['cover_url']),
+                   str(video_dict['video_url'])]]
+        time.sleep(1)
+        Feishu.update_values(log_type, crawler, "c85k1C", "F2:Z2", values)
+        Common.logger(log_type, crawler).info('视频信息写入飞书成功\n')
+
+
+if __name__ == '__main__':
+    XiaoniangaoPlayScheduling.get_videoList("play", "xiaoniangao", "播放量榜爬虫策略", "out", "dev")
+
+    pass

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác