wangkun 2 年之前
父節點
當前提交
ce6444ad7b

+ 5 - 5
README.MD

@@ -64,7 +64,7 @@ nohup python3 -u weixinzhishu/weixinzhishu_main/weixinzhishu_inner_sort.py >>./w
 nohup python3 -u weixinzhishu/weixinzhishu_main/weixinzhishu_inner_long.py >>./weixinzhishu/nohup_inner_long.log 2>&1 &
 nohup python3 -u weixinzhishu/weixinzhishu_main/weixinzhishu_inner_long.py >>./weixinzhishu/nohup_inner_long.log 2>&1 &
 nohup python3 -u weixinzhishu/weixinzhishu_main/weixinzhishu_out.py >>./weixinzhishu/nohup_out.log 2>&1 &
 nohup python3 -u weixinzhishu/weixinzhishu_main/weixinzhishu_out.py >>./weixinzhishu/nohup_out.log 2>&1 &
 ps aux | grep run_weixinzhishu
 ps aux | grep run_weixinzhishu
-ps aux | grep run_weixinzhishu | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep weixinzhishu | grep -v grep | awk '{print $2}' | xargs kill -9
 获取 wechat_key 设备: Mac Air 
 获取 wechat_key 设备: Mac Air 
 ps aux | grep weixinzhishu | grep -v grep | awk '{print $2}' | xargs kill -9 && cd /Users/piaoquan/Desktop/piaoquan_crawler && nohup python3 -u weixinzhishu/weixinzhishu_key/search_key_mac.py >> weixinzhishu/nohup.log 2>&1 &
 ps aux | grep weixinzhishu | grep -v grep | awk '{print $2}' | xargs kill -9 && cd /Users/piaoquan/Desktop/piaoquan_crawler && nohup python3 -u weixinzhishu/weixinzhishu_key/search_key_mac.py >> weixinzhishu/nohup.log 2>&1 &
 ```
 ```
@@ -104,14 +104,14 @@ ps aux | grep run_kuaishou | grep -v grep | awk '{print $2}' | xargs kill -9
 #### 小年糕
 #### 小年糕
 ```commandline
 ```commandline
 阿里云 102 服务器
 阿里云 102 服务器
-定向爬虫策略: sh ./main/main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_follow.py --log_type="follow" --crawler="xiaoniangao" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" xiaoniangao/nohup.log
+定向爬虫策略: sh ./main/shceduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_follow.py --log_type="follow" --crawler="xiaoniangao" --env="prod"  xiaoniangao/nohup.log
 小时榜爬虫策略: 
 小时榜爬虫策略: 
 播放量榜爬虫策略: 
 播放量榜爬虫策略: 
 
 
 线下调试
 线下调试
-定向爬虫策略: sh ./main/main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_follow.py --log_type="follow" --crawler="xiaoniangao" --strategy="定向爬虫策略" --oss_endpoint="out" --env="dev" --machine="local" xiaoniangao/nohup.log
-小时榜爬虫策略: 
-播放量榜爬虫策略:
+定向爬虫策略: sh ./main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_follow.py --log_type="follow" --crawler="xiaoniangao" --env="dev" xiaoniangao/nohup.log
+小时榜爬虫策略: sh ./main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_hour.py --log_type="hour" --crawler="xiaoniangao" --env="dev" xiaoniangao/nohup.log
+播放量榜爬虫策略: sh ./main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_play.py --log_type="hour" --crawler="xiaoniangao" --env="dev" xiaoniangao/nohup.log
 
 
 杀进程命令
 杀进程命令
 ps aux | grep run_xiaoniangao
 ps aux | grep run_xiaoniangao

+ 14 - 4
common/publish.py

@@ -177,23 +177,33 @@ class Publish:
         if env == 'dev':
         if env == 'dev':
             uids_dev = [6267140, 6267141]
             uids_dev = [6267140, 6267141]
             return random.choice(uids_dev)
             return random.choice(uids_dev)
+
+        # 小年糕
         elif crawler == 'xiaoniangao' and env == 'prod' and strategy == '定向爬虫策略':
         elif crawler == 'xiaoniangao' and env == 'prod' and strategy == '定向爬虫策略':
-            uids_prod_xiaoniangao_follow = [20631196, 20631197, 20631199, 20631200, 20631201, 20631185, 20631186,
-                                            20631187, 20631188, 20631189, 20631190, 20631191, 20631192, 20631193,
-                                            50322036, 50322037, 50322038, 50322039, 50322040, 50322041, 50322173,
-                                            50322175]
+            uids_prod_xiaoniangao_follow = [50322210, 50322211, 50322212, 50322213, 50322214, 50322215,
+                                            50322216, 50322217, 50322218, 50322219, 50322220, 50322221]
             return random.choice(uids_prod_xiaoniangao_follow)
             return random.choice(uids_prod_xiaoniangao_follow)
+        elif crawler == 'xiaoniangao' and env == 'prod' and strategy == '小时榜爬虫策略':
+            uids_prod_xiaoniangao_hour = [50322226, 50322227, 50322228, 50322229]
+            return random.choice(uids_prod_xiaoniangao_hour)
+        elif crawler == 'xiaoniangao' and env == 'prod' and strategy == '播放量榜爬虫策略':
+            uids_prod_xiaoniangao_play = [50322222, 50322223, 50322224, 50322225]
+            return random.choice(uids_prod_xiaoniangao_play)
+
         elif crawler == 'kanyikan':
         elif crawler == 'kanyikan':
             uids_prod_kanyikan_moment = [20631208, 20631209, 20631210, 20631211, 20631212,
             uids_prod_kanyikan_moment = [20631208, 20631209, 20631210, 20631211, 20631212,
                                          20631213, 20631214, 20631215, 20631216, 20631217,
                                          20631213, 20631214, 20631215, 20631216, 20631217,
                                          20631223, 20631224, 20631225, 20631226, 20631227]
                                          20631223, 20631224, 20631225, 20631226, 20631227]
             return random.choice(uids_prod_kanyikan_moment)
             return random.choice(uids_prod_kanyikan_moment)
+
         elif crawler == 'ggdc' and env == 'prod' and strategy == 'kanyikan_recommend':
         elif crawler == 'ggdc' and env == 'prod' and strategy == 'kanyikan_recommend':
             uids_ggdc_prod_recommend = [26117661, 26117662, 26117663]
             uids_ggdc_prod_recommend = [26117661, 26117662, 26117663]
             return random.choice(uids_ggdc_prod_recommend)
             return random.choice(uids_ggdc_prod_recommend)
+
         elif crawler == 'ggdc' and env == 'prod' and strategy == 'follow':
         elif crawler == 'ggdc' and env == 'prod' and strategy == 'follow':
             uids_ggdc_prod_follow = [26117661, 26117662, 26117663]
             uids_ggdc_prod_follow = [26117661, 26117662, 26117663]
             return random.choice(uids_ggdc_prod_follow)
             return random.choice(uids_ggdc_prod_follow)
+
         else:
         else:
             return our_uid
             return our_uid
 
 

+ 1 - 1
main/scheduling_main.sh

@@ -17,7 +17,7 @@ elif [ ${env} = "--env=prod" ];then
   piaoquan_crawler_dir=/data5/piaoquan_crawler/
   piaoquan_crawler_dir=/data5/piaoquan_crawler/
   profile_path=/etc/profile
   profile_path=/etc/profile
   python=python
   python=python
-elif [ ${env} = "--env=local" ];then
+elif [ ${env} = "--env=dev" ];then
   piaoquan_crawler_dir=/Users/wangkun/Desktop/crawler/piaoquan_crawler/
   piaoquan_crawler_dir=/Users/wangkun/Desktop/crawler/piaoquan_crawler/
   profile_path=/etc/profile
   profile_path=/etc/profile
   node_path=/opt/homebrew/bin/node
   node_path=/opt/homebrew/bin/node

+ 1 - 0
weixinzhishu/weixinzhishu_key/search_key_mac.py

@@ -70,6 +70,7 @@ class SearchKey:
             Common.logger(log_type, crawler).info("关闭微信成功")
             Common.logger(log_type, crawler).info("关闭微信成功")
 
 
             Common.logger(log_type, crawler).info("关闭微信指数小程序")
             Common.logger(log_type, crawler).info("关闭微信指数小程序")
+            time.sleep(3)
             cmd = "ps aux | grep Program.app | grep -v grep | awk '{print $2}' | xargs kill -9"
             cmd = "ps aux | grep Program.app | grep -v grep | awk '{print $2}' | xargs kill -9"
             os.system(cmd)
             os.system(cmd)
             Common.logger(log_type, crawler).info("微信指数小程序关闭成功")
             Common.logger(log_type, crawler).info("微信指数小程序关闭成功")

二進制
xiaoniangao/.DS_Store


+ 13 - 15
xiaoniangao/xiaoniangao_follow/xiaoniangao_follow.py

@@ -31,11 +31,11 @@ class XiaoniangaoFollow:
 
 
     # 过滤敏感词
     # 过滤敏感词
     @classmethod
     @classmethod
-    def filter_words(cls, log_type):
+    def filter_words(cls, log_type, crawler):
         # 敏感词库列表
         # 敏感词库列表
         word_list = []
         word_list = []
         # 从云文档读取所有敏感词,添加到词库列表
         # 从云文档读取所有敏感词,添加到词库列表
-        lists = Feishu.get_values_batch(log_type, "xiaoniangao", "DRAnZh")
+        lists = Feishu.get_values_batch(log_type, crawler, "DRAnZh")
         for i in lists:
         for i in lists:
             for j in i:
             for j in i:
                 # 过滤空的单元格内容
                 # 过滤空的单元格内容
@@ -100,7 +100,7 @@ class XiaoniangaoFollow:
 
 
     # 获取个人主页视频
     # 获取个人主页视频
     @classmethod
     @classmethod
-    def get_videoList(cls, log_type, crawler, strategy, p_mid, oss_endpoint, env, machine):
+    def get_videoList(cls, log_type, crawler, strategy, p_mid, oss_endpoint, env):
         try:
         try:
             while True:
             while True:
                 url = "https://api.xiaoniangao.cn/profile/list_album"
                 url = "https://api.xiaoniangao.cn/profile/list_album"
@@ -323,26 +323,25 @@ class XiaoniangaoFollow:
                                                  strategy=strategy,
                                                  strategy=strategy,
                                                  video_dict=video_dict,
                                                  video_dict=video_dict,
                                                  oss_endpoint=oss_endpoint,
                                                  oss_endpoint=oss_endpoint,
-                                                 env=env,
-                                                 machine=machine)
+                                                 env=env)
         except Exception as error:
         except Exception as error:
             Common.logger(log_type, crawler).error(f"获取个人主页视频异常:{error}\n")
             Common.logger(log_type, crawler).error(f"获取个人主页视频异常:{error}\n")
 
 
     @classmethod
     @classmethod
-    def repeat_video(cls, log_type, crawler, video_id, env, machine):
+    def repeat_video(cls, log_type, crawler, video_id, env):
         sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
         sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
-        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         return len(repeat_video)
         return len(repeat_video)
 
 
     # 下载/上传
     # 下载/上传
     @classmethod
     @classmethod
-    def download_publish(cls, log_type, crawler, strategy, video_dict, oss_endpoint, env, machine):
+    def download_publish(cls, log_type, crawler, strategy, video_dict, oss_endpoint, env):
         try:
         try:
             if cls.download_rule(video_dict) is False:
             if cls.download_rule(video_dict) is False:
                 Common.logger(log_type, crawler).info("不满足基础门槛\n")
                 Common.logger(log_type, crawler).info("不满足基础门槛\n")
-            elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
+            elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
                 Common.logger(log_type, crawler).info('视频已下载\n')
                 Common.logger(log_type, crawler).info('视频已下载\n')
-            elif any(str(word) if str(word) in video_dict['video_title'] else False for word in cls.filter_words(log_type)) is True:
+            elif any(str(word) if str(word) in video_dict['video_title'] else False for word in cls.filter_words(log_type, crawler)) is True:
                 Common.logger(log_type, crawler).info("视频已中过滤词\n")
                 Common.logger(log_type, crawler).info("视频已中过滤词\n")
             else:
             else:
                 # 下载封面
                 # 下载封面
@@ -406,7 +405,7 @@ class XiaoniangaoFollow:
                                                 {int(video_dict['video_width'])},
                                                 {int(video_dict['video_width'])},
                                                 {int(video_dict['video_height'])}) """
                                                 {int(video_dict['video_height'])}) """
                 Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
                 Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-                MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
+                MysqlHelper.update_values(log_type, crawler, insert_sql, env)
                 Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
                 Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
 
 
                 # 视频写入飞书
                 # 视频写入飞书
@@ -440,7 +439,7 @@ class XiaoniangaoFollow:
 
 
     # 获取所有关注列表的用户视频
     # 获取所有关注列表的用户视频
     @classmethod
     @classmethod
-    def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
+    def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env):
         try:
         try:
             # 已关注的用户列表 mids
             # 已关注的用户列表 mids
             user_list = cls.get_users(log_type, crawler)
             user_list = cls.get_users(log_type, crawler)
@@ -453,8 +452,7 @@ class XiaoniangaoFollow:
                                   strategy=strategy,
                                   strategy=strategy,
                                   p_mid=profile_mid,
                                   p_mid=profile_mid,
                                   oss_endpoint=oss_endpoint,
                                   oss_endpoint=oss_endpoint,
-                                  env=env,
-                                  machine=machine)
+                                  env=env)
                 cls.next_t = None
                 cls.next_t = None
                 time.sleep(1)
                 time.sleep(1)
         except Exception as e:
         except Exception as e:
@@ -463,5 +461,5 @@ class XiaoniangaoFollow:
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":
     # print(XiaoniangaoFollow.repeat_video("follow", "xiaoniangao", "4919087666", "prod", "aliyun"))
     # print(XiaoniangaoFollow.repeat_video("follow", "xiaoniangao", "4919087666", "prod", "aliyun"))
-    print(XiaoniangaoFollow.repeat_video("follow", "xiaoniangao", "4919087666", "dev", "local"))
+    print(XiaoniangaoFollow.repeat_video("follow", "xiaoniangao", "4919087666", "dev"))
     pass
     pass

+ 225 - 554
xiaoniangao/xiaoniangao_hour/xiaoniangao_hour.py

@@ -14,20 +14,12 @@ sys.path.append(os.getcwd())
 from common.common import Common
 from common.common import Common
 from common.feishu import Feishu
 from common.feishu import Feishu
 from common.publish import Publish
 from common.publish import Publish
-from common.db import MysqlHelper
+from common.scheduling_db import MysqlHelper
 proxies = {"http": None, "https": None}
 proxies = {"http": None, "https": None}
 
 
 
 
 class XiaoniangaoHour:
 class XiaoniangaoHour:
     platform = "小年糕"
     platform = "小年糕"
-    # # 配置微信
-    # time.sleep(1)
-    # wechat_sheet = Feishu.get_values_batch("hour", "xiaoniangao", "dzcWHw")
-    # hour_x_b3_traceid = wechat_sheet[2][1]
-    # hour_x_token_id = wechat_sheet[3][1]
-    # hour_referer = wechat_sheet[4][1]
-    # hour_uid = wechat_sheet[5][1]
-    # hour_token = wechat_sheet[6][1]
 
 
     # 生成 uid、token
     # 生成 uid、token
     @classmethod
     @classmethod
@@ -96,30 +88,6 @@ class XiaoniangaoHour:
             return False
             return False
         return False
         return False
 
 
-    # 检查是否有今日的上升榜日期
-    @classmethod
-    def check_data(cls, log_type, crawler, date):
-        while True:
-            hour_sheet = Feishu.get_values_batch(log_type, crawler, "ba0da4")
-            if hour_sheet is None:
-                Common.logger(log_type, crawler).warning(f'小时级数据_feeds:{hour_sheet}\n')
-                continue
-            # 判断J1单元格的日期是否为今天
-            if Feishu.get_range_value(log_type, crawler, "ba0da4", "L1:L1")[0] != date:
-                # 插入3列 L1:N1,并写入日期和时间数据
-                values = [[date], ["10:00", "15:00", "20:00"]]
-                time.sleep(1)
-                Feishu.insert_columns(log_type, crawler, "ba0da4", "COLUMNS", 11, 14)
-                time.sleep(1)
-                Feishu.update_values(log_type, crawler, "ba0da4", "L1:N2", values)
-                time.sleep(1)
-                Feishu.merge_cells(log_type, crawler, "ba0da4", "L1:N1")
-                Common.logger(log_type, crawler).info("插入今天日期成功\n")
-                return
-            else:
-                Common.logger(log_type, crawler).info("今日上升榜日期已存在\n")
-                return
-
     # 获取表情及符号
     # 获取表情及符号
     @classmethod
     @classmethod
     def get_expression(cls):
     def get_expression(cls):
@@ -137,14 +105,20 @@ class XiaoniangaoHour:
             return expression_list, char_list
             return expression_list, char_list
 
 
     @classmethod
     @classmethod
-    def repeat_video(cls, log_type, crawler, video_id, env, machine):
+    def repeat_video(cls, log_type, crawler, video_id, env):
         sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
         sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
-        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def repeat_hour(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_xiaoniangao_hour where platform="小年糕" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         return len(repeat_video)
         return len(repeat_video)
 
 
     # 获取列表
     # 获取列表
     @classmethod
     @classmethod
-    def get_videoList(cls, log_type, crawler, env, machine):
+    def get_videoList(cls, log_type, crawler, env):
         # try:
         # try:
         uid_token_dict = cls.get_uid_token()
         uid_token_dict = cls.get_uid_token()
         url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
         url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
@@ -306,7 +280,7 @@ class XiaoniangaoHour:
                 else:
                 else:
                     video_send_time = 0
                     video_send_time = 0
                 publish_time_stamp = int(int(video_send_time)/1000)
                 publish_time_stamp = int(int(video_send_time)/1000)
-                publish_time_str = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(publish_time_stamp))
+                publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
 
 
                 # 用户名 / 头像
                 # 用户名 / 头像
                 if "user" in feeds[i]:
                 if "user" in feeds[i]:
@@ -367,35 +341,49 @@ class XiaoniangaoHour:
                 # 抓取基础规则过滤
                 # 抓取基础规则过滤
                 elif cls.download_rule(video_dict) is False:
                 elif cls.download_rule(video_dict) is False:
                     Common.logger(log_type, crawler).info("不满足基础门槛规则\n")
                     Common.logger(log_type, crawler).info("不满足基础门槛规则\n")
-                elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
+                elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
                     Common.logger(log_type, crawler).info('视频已下载\n')
                     Common.logger(log_type, crawler).info('视频已下载\n')
                 # 过滤敏感词
                 # 过滤敏感词
                 elif any(str(word) if str(word) in video_title else False for word in cls.filter_words(log_type, crawler)) is True:
                 elif any(str(word) if str(word) in video_title else False for word in cls.filter_words(log_type, crawler)) is True:
                     Common.logger(log_type, crawler).info("视频已中过滤词\n")
                     Common.logger(log_type, crawler).info("视频已中过滤词\n")
                     time.sleep(1)
                     time.sleep(1)
                 else:
                 else:
-                    # 写入飞书小时级feeds工作表
-                    Feishu.insert_columns(log_type, crawler, "ba0da4", "ROWS", 2, 3)
-                    get_feeds_time = int(time.time())
-                    values = [[profile_id,
-                               profile_mid,
-                               video_id,
-                               video_title,
-                               user_name,
-                               video_duration,
-                               cover_url,
-                               video_url,
-                               publish_time_str,
-                               str(time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(get_feeds_time))),
-                               video_play_cnt]]
-                    time.sleep(0.5)
-                    Feishu.update_values(log_type, crawler, "ba0da4", "A3:K3", values)
-                    Common.logger(log_type, crawler).info("视频添加至小时级数据_feeds成功\n")
+                    # 写入飞书小时级feeds数据库表
+                    insert_sql = f""" insert into crawler_xiaoniangao_hour(profile_id,
+                    profile_mid,
+                    platform,
+                    out_video_id,
+                    video_title,
+                    user_name,
+                    cover_url,
+                    video_url,
+                    duration,
+                    publish_time,
+                    play_cnt,
+                    crawler_time_stamp,
+                    crawler_time)
+                    values({profile_id},
+                    {profile_mid},
+                    "{cls.platform}",
+                    "{video_id}",
+                    "{video_title}",
+                    "{user_name}",
+                    "{cover_url}",
+                    "{video_url}",
+                    {video_duration},
+                    "{publish_time_str}",
+                    {video_play_cnt},
+                    {int(time.time())},
+                    "{time.strftime("%Y-%y-%d %H:%M:%S", time.localtime(int(time.time())))}"
+                    )"""
+                    Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                    MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+                    Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
         # except Exception as e:
         # except Exception as e:
         #     Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
         #     Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
 
 
     @classmethod
     @classmethod
-    def download_video(cls, log_type, crawler, p_id, p_mid, v_title, v_id, strategy, oss_endpoint, env, machine):
+    def get_video_info(cls, log_type, crawler, p_id, p_mid, v_title, v_id):
         try:
         try:
             uid_token_dict = cls.get_uid_token()
             uid_token_dict = cls.get_uid_token()
             url = "https://kapi.xiaoniangao.cn/profile/get_profile_by_id"
             url = "https://kapi.xiaoniangao.cn/profile/get_profile_by_id"
@@ -493,523 +481,206 @@ class XiaoniangaoHour:
                     "video_url": hour_video_url,
                     "video_url": hour_video_url,
                     "session": f"xiaoniangao-hour-{int(time.time())}"
                     "session": f"xiaoniangao-hour-{int(time.time())}"
                 }
                 }
-                # 下载封面
-                Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_info_dict["video_title"], url=video_info_dict["cover_url"])
-                # 下载视频
-                Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_info_dict["video_title"], url=video_info_dict["video_url"])
-                # 保存视频信息至 "./videos/{download_video_title}/info.txt"
-                Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_info_dict)
-
-                # 上传视频
-                Common.logger(log_type, crawler).info("开始上传视频...")
-                our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                          crawler=crawler,
-                                                          strategy=strategy,
-                                                          our_uid="hour",
-                                                          env=env,
-                                                          oss_endpoint=oss_endpoint)
-                if env == "dev":
-                    our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-                else:
-                    our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-                Common.logger(log_type, crawler).info("视频上传完成")
-
-                if our_video_id is None:
-                    # 删除视频文件夹
-                    shutil.rmtree(f"./{crawler}/videos/{video_info_dict['video_title']}")
-                    return
-
-                # 视频信息保存数据库
-                rule_dict = {
-                    "duration": {"min": 40},
-                    "play_cnt": {"min": 4000},
-                    "publish_day": {"min": 10}
-                }
-
-                insert_sql = f""" insert into crawler_video(video_id,
-                                                                out_user_id,
-                                                                platform,
-                                                                strategy,
-                                                                out_video_id,
-                                                                video_title,
-                                                                cover_url,
-                                                                video_url,
-                                                                duration,
-                                                                publish_time,
-                                                                play_cnt,
-                                                                crawler_rule,
-                                                                width,
-                                                                height)
-                                                                values({our_video_id},
-                                                                "{video_info_dict['profile_id']}",
-                                                                "{cls.platform}",
-                                                                "小时榜爬虫策略",
-                                                                "{video_info_dict['video_id']}",
-                                                                "{video_info_dict['video_title']}",
-                                                                "{video_info_dict['cover_url']}",
-                                                                "{video_info_dict['video_url']}",
-                                                                {int(video_info_dict['duration'])},
-                                                                "{video_info_dict['publish_time_str']}",
-                                                                {int(video_info_dict['play_cnt'])},
-                                                                '{json.dumps(rule_dict)}',
-                                                                {int(video_info_dict['video_width'])},
-                                                                {int(video_info_dict['video_height'])}) """
-                Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-                MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
-                Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
-
-                # 视频写入飞书
-                Feishu.insert_columns(log_type, crawler, "yatRv2", "ROWS", 1, 2)
-                # 视频ID工作表,首行写入数据
-                upload_time = int(time.time())
-                values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
-                           "小时级上升榜",
-                           str(video_info_dict['video_id']),
-                           str(video_info_dict['video_title']),
-                           our_video_link,
-                           video_info_dict['play_cnt'],
-                           video_info_dict['comment_cnt'],
-                           video_info_dict['like_cnt'],
-                           video_info_dict['share_cnt'],
-                           video_info_dict['duration'],
-                           f"{video_info_dict['video_width']}*{video_info_dict['video_height']}",
-                           str(video_info_dict['publish_time_str'].replace("-", "/")),
-                           str(video_info_dict['user_name']),
-                           str(video_info_dict['profile_id']),
-                           str(video_info_dict['profile_mid']),
-                           str(video_info_dict['avatar_url']),
-                           str(video_info_dict['cover_url']),
-                           str(video_info_dict['video_url'])]]
-                time.sleep(1)
-                Feishu.update_values(log_type, crawler, "yatRv2", "F2:Z2", values)
-                Common.logger(log_type, crawler).info('视频信息写入飞书成功\n')
+                return video_info_dict
 
 
         except Exception as e:
         except Exception as e:
             Common.logger(log_type, crawler).error(f"download_video:{e}\n")
             Common.logger(log_type, crawler).error(f"download_video:{e}\n")
 
 
     # 更新小时榜数据
     # 更新小时榜数据
     @classmethod
     @classmethod
-    def update_videoList(cls, log_type, crawler, today, yesterday, before_yesterday):
+    def update_videoList(cls, log_type, crawler, strategy, oss_endpoint, env):
         """
         """
         更新小时榜数据
         更新小时榜数据
         """
         """
-        try:
-            update_hour_sheet = Feishu.get_values_batch("hour", "xiaoniangao", "ba0da4")
-            if len(update_hour_sheet) == 2:
-                Common.logger(log_type, crawler).info("当前工作表无数据")
+        befor_yesterday = (datetime.date.today() + datetime.timedelta(days=-3)).strftime("%Y-%m-%d %H:%M:%S")
+        update_time_stamp = int(time.mktime(time.strptime(befor_yesterday, "%Y-%m-%d %H:%M:%S")))
+        select_sql = f""" select * from crawler_xiaoniangao_hour where crawler_time_stamp >= {update_time_stamp} """
+        update_video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env)
+        if len(update_video_list) == 0:
+            Common.logger(log_type, crawler).info("暂无需要更新的小时榜数据\n")
+            return
+        for update_video_info in update_video_list:
+            profile_id = update_video_info["profile_id"]
+            profile_mid = update_video_info["profile_mid"]
+            video_title = update_video_info["video_title"]
+            video_id = update_video_info["out_video_id"]
+            if datetime.datetime.now().hour == 10 and datetime.datetime.now().minute <=10:
+                video_info_dict = cls.get_video_info(log_type=log_type,
+                                                  crawler=crawler,
+                                                  p_id=profile_id,
+                                                  p_mid=profile_mid,
+                                                  v_title=video_title,
+                                                  v_id=video_id)
+                ten_play_cnt = video_info_dict['play_cnt']
+                Common.logger(log_type, crawler).info(f"ten_play_cnt:{ten_play_cnt}")
+                update_sql = f""" update crawler_xiaoniangao_hour set ten_play_cnt={ten_play_cnt} WHERE out_video_id={video_id}; """
+                # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
+                MysqlHelper.update_values(log_type, crawler, update_sql, env)
+                cls.download_publish(log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint, env)
+            elif datetime.datetime.now().hour == 15 and datetime.datetime.now().minute <=10:
+                video_info_dict = cls.get_video_info(log_type=log_type,
+                                                     crawler=crawler,
+                                                     p_id=profile_id,
+                                                     p_mid=profile_mid,
+                                                     v_title=video_title,
+                                                     v_id=video_id)
+                fifteen_play_cnt = video_info_dict['play_cnt']
+                Common.logger(log_type, crawler).info(f"ten_play_cnt:{fifteen_play_cnt}")
+                update_sql = f""" update crawler_xiaoniangao_hour set ten_play_cnt={fifteen_play_cnt} WHERE out_video_id={video_id}; """
+                # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
+                MysqlHelper.update_values(log_type, crawler, update_sql, env)
+                cls.download_publish(log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint, env)
+            elif datetime.datetime.now().hour == 20 and datetime.datetime.now().minute <=10:
+                video_info_dict = cls.get_video_info(log_type=log_type,
+                                                     crawler=crawler,
+                                                     p_id=profile_id,
+                                                     p_mid=profile_mid,
+                                                     v_title=video_title,
+                                                     v_id=video_id)
+                twenty_play_cnt = video_info_dict['play_cnt']
+                Common.logger(log_type, crawler).info(f"ten_play_cnt:{twenty_play_cnt}")
+                update_sql = f""" update crawler_xiaoniangao_hour set ten_play_cnt={twenty_play_cnt} WHERE out_video_id={video_id}; """
+                # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
+                MysqlHelper.update_values(log_type, crawler, update_sql, env)
+                cls.download_publish(log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint, env)
             else:
             else:
-                for i in range(2, len(update_hour_sheet) + 1):
-                    Common.logger(log_type, crawler).info(f"更新第:{i+1}行视频信息")
+                pass
 
 
-                    # 略过空行
-                    if update_hour_sheet[i][0] is None \
-                            or update_hour_sheet[i][1] is None or update_hour_sheet[i][2] is None:
-                        Common.logger(log_type, crawler).info("空行,略过")
-                    else:
-                        # 视频标题
-                        v_title = update_hour_sheet[i][3]
-                        Common.logger(log_type, crawler).info("video_title:{}", v_title)
-
-                        # 视频 ID
-                        v_id = update_hour_sheet[i][2]
-                        Common.logger(log_type, crawler).info("video_id:{}", v_id)
-
-                        # profile_id,用户 ID
-                        p_id = update_hour_sheet[i][0]
-                        Common.logger(log_type, crawler).info("profile_id:{}", p_id)
-
-                        # profile_mid
-                        p_mid = update_hour_sheet[i][1]
-                        Common.logger(log_type, crawler).info("profile_mid:{}", p_mid)
-
-                        # 抓取时的播放量
-                        v_play_cnt = update_hour_sheet[i][10]
-                        Common.logger(log_type, crawler).info("video_play_cnt:{}", v_play_cnt)
-
-                        # 抓取时间
-                        v_upload_time = update_hour_sheet[i][9]
-                        Common.logger(log_type, crawler).info("video_send_time:{}", v_upload_time)
-                        # 抓取时间的时间戳格式(秒为单位)
-                        v_time = int(time.mktime(time.strptime(v_upload_time, "%Y/%m/%d %H:%M:%S")))
-
-                        # 抓取时间:日期
-                        upload_data = v_upload_time.split(" ")[0]
-                        # 抓取时间:小时
-                        upload_hour = v_upload_time.split(" ")[-1].split(":")[0]
-
-                        uid_token_dict = cls.get_uid_token()
-                        url = "https://kapi.xiaoniangao.cn/profile/get_profile_by_id"
-                        headers = {
-                            # "x-b3-traceid": cls.hour_x_b3_traceid,
-                            "x-b3-traceid": '1c403a4aa72e3c',
-                            # "X-Token-Id": cls.hour_x_token_id,
-                            "X-Token-Id": 'ab619e96d801f1567388629260aa68ec-1202200806',
-                            # "uid": cls.hour_uid,
-                            "uid": uid_token_dict['uid'],
-                            "content-type": "application/json",
-                            "Accept-Encoding": "gzip,compress,br,deflate",
-                            "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
-                                          ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
-                                          'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
-                            # "Referer": cls.hour_referer
-                            "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/624/page-frame.html'
-                        }
-                        data = {
-                            "play_src": "1",
-                            "profile_id": int(p_id),
-                            "profile_mid": int(p_mid),
-                            "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/"
-                                  "!400x400r/crop/400x400/interlace/1/format/jpg",
-                            "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail"
-                                    "/!80x80r/crop/80x80/interlace/1/format/jpg",
-                            "share_width": 625,
-                            "share_height": 500,
-                            "no_comments": True,
-                            "no_follow": True,
-                            "vid": v_id,
-                            "hot_l1_comment": True,
-                            # "token": cls.hour_token,
-                            # "uid": cls.hour_uid,
-                            "token": uid_token_dict['token'],
-                            "uid": uid_token_dict['uid'],
-                            "proj": "ma",
-                            "wx_ver": "8.0.20",
-                            "code_ver": "3.62.0",
-                            "log_common_params": {
-                                "e": [{
-                                    "data": {
-                                        "page": "dynamicSharePage"
-                                    }
-                                }],
-                                "ext": {
-                                    "brand": "iPhone",
-                                    "device": "iPhone 11",
-                                    "os": "iOS 14.7.1",
-                                    "weixinver": "8.0.20",
-                                    "srcver": "2.24.3",
-                                    "net": "wifi",
-                                    "scene": "1089"
-                                },
-                                "pj": "1",
-                                "pf": "2",
-                                "session_id": "7bcce313-b57d-4305-8d14-6ebd9a1bad29"
-                            }
-                        }
-                        try:
-                            urllib3.disable_warnings()
-                            r = requests.post(headers=headers, url=url, json=data, proxies=proxies, verify=False)
-                            hour_play_cnt = r.json()["data"]["play_pv"]
-                            Common.logger(log_type, crawler).info("视频详情,当前播放量:{}", hour_play_cnt)
-                            # 固定时间获取符合规则的视频,写入云文档:https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=ba0da4
-                            update_hour = datetime.datetime.now()
-                            if int(time.time()) - v_time >= 172800:
-                                Common.logger(log_type, crawler).info("抓取时间超过 2 天\n")
-                                return
-                            elif upload_data == today and update_hour.hour == 10 and int(upload_hour) <= 10:
-                                Common.logger(log_type, crawler).info("满足条件: 抓取日期为今天 and 当前时间:10点 and 抓取时间<=10点")
-
-                                # 当天 10:00 视频播放量
-                                ten_hour_play_cnt = hour_play_cnt
-                                Common.logger(log_type, crawler).info("当天 10:00 视频播放量:{}", ten_hour_play_cnt)
-
-                                # 10:00 的上升榜写入数据
-                                values = int(ten_hour_play_cnt) - int(v_play_cnt)
-                                time.sleep(1)
-                                Feishu.update_values(
-                                    log_type, "xiaoniangao", "ba0da4",
-                                    "L" + str(i + 1) + ":" + "L" + str(i + 1), [[values]])
-                                Common.logger(log_type, crawler).info(f"10:00数据更新成功:{values}\n")
-
-                            elif upload_data == today and update_hour.hour == 15 and int(upload_hour) <= 10:
-                                Common.logger(log_type, crawler).info("满足条件: 抓取日期为今天 and 当前时间:15点 and 抓取时间<=10点")
-
-                                # 当天 15:00 视频播放量
-                                fifteen_hour_play_cnt = hour_play_cnt
-                                Common.logger(log_type, crawler).info(f"当天 15:00 视频播放量:{fifteen_hour_play_cnt}")
-
-                                # 当天 10:00 上升的数据
-                                if update_hour_sheet[i][11] is None:
-                                    ten_up_cnt = 0
-                                else:
-                                    ten_up_cnt = update_hour_sheet[i][11]
-
-                                # 15:00 的上升榜写入数据
-                                values = int(fifteen_hour_play_cnt) - (int(v_play_cnt) + int(ten_up_cnt))
-                                time.sleep(1)
-                                Feishu.update_values(
-                                    log_type, "xiaoniangao", "ba0da4",
-                                    "M" + str(i + 1) + ":" + "M" + str(i + 1), [[values]])
-                                Common.logger(log_type, crawler).info("15:00数据更新成功:{}\n", values)
-
-                            elif upload_data == today and update_hour.hour == 15 and 10 < int(upload_hour) <= 15:
-                                Common.logger(log_type, crawler).info("满足条件: 抓取日期为今天 and 当前时间:15点 and 10<抓取时间<=15点")
-
-                                # 当天 15:00 视频播放量
-                                fifteen_hour_play_cnt = hour_play_cnt
-                                Common.logger(log_type, crawler).info("当天 15:00 视频播放量:{}", fifteen_hour_play_cnt)
-
-                                # 15:00 的上升榜写入数据
-                                values = int(fifteen_hour_play_cnt) - int(v_play_cnt)
-                                time.sleep(1)
-                                Feishu.update_values(
-                                    log_type, "xiaoniangao", "ba0da4",
-                                    "M" + str(i + 1) + ":" + "M" + str(i + 1), [[values]])
-                                Common.logger(log_type, crawler).info("15:00数据更新成功:{}\n", values)
-
-                            elif upload_data == today and update_hour.hour == 20 and int(upload_hour) <= 10:
-                                Common.logger(log_type, crawler).info("满足条件: 抓取日期为今天 and 当前时间:20点 and 抓取时间<=10点")
-
-                                # 当天 20:00 视频播放量
-                                twenty_hour_play_cnt = hour_play_cnt
-                                Common.logger(log_type, crawler).info("当天 20:00 视频播放量:{}", twenty_hour_play_cnt)
-
-                                # 当天 10:00 上升的数据
-                                if update_hour_sheet[i][11] is None:
-                                    ten_up_cnt = 0
-                                else:
-                                    ten_up_cnt = update_hour_sheet[i][11]
-
-                                # 当天 15:00 上升的数据
-                                if update_hour_sheet[i][12] is None:
-                                    fifteen_up_cnt = 0
-                                else:
-                                    fifteen_up_cnt = update_hour_sheet[i][12]
-
-                                # 20:00 的上升榜写入数据
-                                values = int(twenty_hour_play_cnt) - (
-                                        int(v_play_cnt) + int(ten_up_cnt) + int(fifteen_up_cnt))
-                                time.sleep(1)
-                                Feishu.update_values(
-                                    log_type, "xiaoniangao", "ba0da4",
-                                    "N" + str(i + 1) + ":" + "N" + str(i + 1), [[values]])
-                                Common.logger(log_type, crawler).info("20:00数据更新成功:{}\n", values)
-
-                            elif upload_data == today and update_hour.hour == 20 and 10 < int(upload_hour) <= 15:
-                                Common.logger(log_type, crawler).info("满足条件: 抓取日期为今天 and 当前时间:20点 and 10<抓取时间<=15点")
-
-                                # 当天 20:00 视频播放量
-                                twenty_hour_play_cnt = hour_play_cnt
-                                Common.logger(log_type, crawler).info("当天 20:00 视频播放量:{}", twenty_hour_play_cnt)
-
-                                # 当天 15:00 上升的数据
-                                if update_hour_sheet[i][12] is None:
-                                    fifteen_up_cnt = 0
-                                else:
-                                    fifteen_up_cnt = update_hour_sheet[i][12]
-
-                                # 20:00 的上升榜写入数据
-                                values = int(twenty_hour_play_cnt) - (int(v_play_cnt) + int(fifteen_up_cnt))
-                                time.sleep(1)
-                                Feishu.update_values(
-                                    log_type, "xiaoniangao", "ba0da4",
-                                    "N" + str(i + 1) + ":" + "N" + str(i + 1), [[values]])
-                                Common.logger(log_type, crawler).info("20:00数据更新成功:{}\n", values)
-
-                            elif upload_data == today and update_hour.hour == 20 and 15 < int(upload_hour) <= 20:
-                                Common.logger(log_type, crawler).info("满足条件: 抓取日期为今天 and 当前时间:20点 and 15<抓取时间<=20点")
-
-                                # 当天 20:00 视频播放量
-                                twenty_hour_play_cnt = hour_play_cnt
-                                Common.logger(log_type, crawler).info("当天 20:00 视频播放量:{}", twenty_hour_play_cnt)
-
-                                # 20:00 的上升榜写入数据
-                                values = int(twenty_hour_play_cnt) - int(v_play_cnt)
-                                time.sleep(1)
-                                Feishu.update_values(
-                                    log_type, "xiaoniangao", "ba0da4",
-                                    "N" + str(i + 1) + ":" + "N" + str(i + 1), [[values]])
-                                Common.logger(log_type, crawler).info("20:00数据更新成功:{}\n", values)
-
-                            elif (upload_data == yesterday or upload_data == before_yesterday) \
-                                    and update_hour.hour == 10:
-                                Common.logger(log_type, crawler).info("满足条件: 抓取时间小于今天 and 当前时间:10点")
-
-                                # 当天 10:00 视频播放量
-                                ten_hour_play_cnt = hour_play_cnt
-                                Common.logger(log_type, crawler).info("当天 10:00 视频播放量:{}", ten_hour_play_cnt)
-
-                                # 10:00 的上升榜写入数据
-                                values = int(ten_hour_play_cnt) - int(v_play_cnt)
-                                time.sleep(1)
-                                Feishu.update_values(
-                                    log_type, "xiaoniangao", "ba0da4",
-                                    "L" + str(i + 1) + ":" + "L" + str(i + 1), [[values]])
-                                Common.logger(log_type, crawler).info("10:00数据更新成功:{}\n", values)
-
-                            elif (upload_data == yesterday or upload_data == before_yesterday) \
-                                    and update_hour.hour == 15:
-                                Common.logger(log_type, crawler).info("满足条件: 抓取时间小于今天 and 当前时间:15点")
-
-                                # 当天 15:00 视频播放量
-                                fifteen_hour_play_cnt = hour_play_cnt
-                                Common.logger(log_type, crawler).info("当天 15:00 视频播放量:{}", fifteen_hour_play_cnt)
-
-                                # 当天 10:00 上升的数据
-                                if update_hour_sheet[i][11] is None:
-                                    ten_up_cnt = 0
-                                else:
-                                    ten_up_cnt = update_hour_sheet[i][11]
-
-                                # 15:00 的上升榜写入数据
-                                values = int(fifteen_hour_play_cnt) - (int(v_play_cnt) + int(ten_up_cnt))
-                                time.sleep(1)
-                                Feishu.update_values(
-                                    log_type, "xiaoniangao", "ba0da4",
-                                    "M" + str(i + 1) + ":" + "M" + str(i + 1), [[values]])
-                                Common.logger(log_type, crawler).info("15:00数据更新成功:{}\n", values)
-
-                            elif (upload_data == yesterday or upload_data == before_yesterday) \
-                                    and update_hour.hour == 20:
-                                Common.logger(log_type, crawler).info("满足条件: 抓取时间小于今天 and 当前时间:20点")
-
-                                # 当天 20:00 视频播放量
-                                twenty_hour_play_cnt = hour_play_cnt
-                                Common.logger(log_type, crawler).info("当天 20:00 视频播放量:{}", twenty_hour_play_cnt)
-
-                                # 当天 10:00 上升的数据
-                                if update_hour_sheet[i][11] is None:
-                                    ten_up_cnt = 0
-                                else:
-                                    ten_up_cnt = update_hour_sheet[i][11]
-
-                                # 当天 15:00 上升的数据
-                                if update_hour_sheet[i][12] is None:
-                                    fifteen_up_cnt = 0
-                                else:
-                                    fifteen_up_cnt = update_hour_sheet[i][12]
-
-                                # 20:00 的上升榜写入数据
-                                values = int(twenty_hour_play_cnt) - (
-                                        int(v_play_cnt) + int(ten_up_cnt) + int(fifteen_up_cnt))
-                                time.sleep(1)
-                                Feishu.update_values(
-                                    log_type, "xiaoniangao", "ba0da4",
-                                    "N" + str(i + 1) + ":" + "N" + str(i + 1), [[values]])
-                                Common.logger(log_type, crawler).info("20:00数据更新成功:{}\n", values)
-
-                        except Exception as e:
-                            Common.logger(log_type, crawler).error("视频详情:{},异常:{}\n", v_title, e)
-        except Exception as e:
-            Common.logger(log_type, crawler).error("获取小时榜数据异常:{}\n", e)
-
-    # 下载/上传
     @classmethod
     @classmethod
-    def download_publish(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
-        """
-        2.从云文档中下载符合规则的视频:https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=ba0da4
-            2.1 当日 10:00 or 15:00 or 20:00 视频播放量上升 > 5000
-            2.2 当日 10:00 and 15:00 视频播放量上升 > 2000
-            2.3 当日 15:00 and 20:00 视频播放量上升 > 2000
-            2.4 昨日 20:00 and 今日 10:00 视频播放量上升 > 2000
-        """
-        while True:
-            try:
-                hour_sheet = Feishu.get_values_batch("hour", "xiaoniangao", "ba0da4")
-                if hour_sheet is None:
-                    Common.logger(log_type, crawler).warning(f"小时级数据_feeds:{hour_sheet}\n")
-                    continue
-                if len(hour_sheet) == 2:
-                    Common.logger(log_type, crawler).info("小时级数据_feeds,没有数据\n")
-                    return
-                for i in range(2, len(hour_sheet)):
-                    Common.logger(log_type, crawler).info(f"分析第:{i+1}行视频信息是否符合下载规则")
-
-                    # 略过空行
-                    if hour_sheet[i][0] is None  or hour_sheet[i][1] is None or hour_sheet[i][2] is None:
-                        Common.logger(log_type, crawler).info("空行,略过")
-                        continue
-
-                    # 视频标题
-                    v_title = hour_sheet[i][3]
-
-                    # 视频 ID
-                    v_id = hour_sheet[i][2]
-
-                    # profile_id,用户 ID
-                    p_id = hour_sheet[i][0]
-
-                    # profile_mid
-                    p_mid = hour_sheet[i][1]
-
-                    # 抓取时间
-                    v_upload_time = hour_sheet[i][9]
-                    v_send_time = int(time.mktime(time.strptime(v_upload_time, "%Y/%m/%d %H:%M:%S")))
-
-                    # 播放量
-                    v_play_cnt = hour_sheet[i][10]
-
-                    # 今日 10:00 数据上升量
-                    if hour_sheet[i][11] is None:
-                        ten_cnt = 0
-                    else:
-                        ten_cnt = hour_sheet[i][11]
-
-                    # 今日 15:00 数据上升量
-                    if hour_sheet[i][12] is None:
-                        fifteen_cnt = 0
-                    else:
-                        fifteen_cnt = hour_sheet[i][12]
+    def download(cls, log_type, crawler, video_info_dict, strategy, oss_endpoint, env):
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_info_dict["video_title"], url=video_info_dict["cover_url"])
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_info_dict["video_title"], url=video_info_dict["video_url"])
+        # 保存视频信息至 "./videos/{download_video_title}/info.txt"
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_info_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                  crawler=crawler,
+                                                  strategy=strategy,
+                                                  our_uid="hour",
+                                                  env=env,
+                                                  oss_endpoint=oss_endpoint)
+        if env == "dev":
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        else:
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        Common.logger(log_type, crawler).info("视频上传完成")
+
+        if our_video_id is None:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{video_info_dict['video_title']}")
+            return
+
+        # 视频信息保存数据库
+        rule_dict = {
+            "duration": {"min": 40},
+            "play_cnt": {"min": 4000},
+            "publish_day": {"min": 10}
+        }
 
 
-                    # 今日 20:00 数据上升量
-                    if hour_sheet[i][13] is None:
-                        twenty_cnt = 0
-                    else:
-                        twenty_cnt = hour_sheet[i][13]
+        insert_sql = f""" insert into crawler_video(video_id,
+                                                        out_user_id,
+                                                        platform,
+                                                        strategy,
+                                                        out_video_id,
+                                                        video_title,
+                                                        cover_url,
+                                                        video_url,
+                                                        duration,
+                                                        publish_time,
+                                                        play_cnt,
+                                                        crawler_rule,
+                                                        width,
+                                                        height)
+                                                        values({our_video_id},
+                                                        "{video_info_dict['profile_id']}",
+                                                        "{cls.platform}",
+                                                        "小时榜爬虫策略",
+                                                        "{video_info_dict['video_id']}",
+                                                        "{video_info_dict['video_title']}",
+                                                        "{video_info_dict['cover_url']}",
+                                                        "{video_info_dict['video_url']}",
+                                                        {int(video_info_dict['duration'])},
+                                                        "{video_info_dict['publish_time_str']}",
+                                                        {int(video_info_dict['play_cnt'])},
+                                                        '{json.dumps(rule_dict)}',
+                                                        {int(video_info_dict['video_width'])},
+                                                        {int(video_info_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+        Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+
+        # 视频写入飞书
+        Feishu.insert_columns(log_type, crawler, "yatRv2", "ROWS", 1, 2)
+        # 视频ID工作表,首行写入数据
+        upload_time = int(time.time())
+        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                   "小时级上升榜",
+                   str(video_info_dict['video_id']),
+                   str(video_info_dict['video_title']),
+                   our_video_link,
+                   video_info_dict['play_cnt'],
+                   video_info_dict['comment_cnt'],
+                   video_info_dict['like_cnt'],
+                   video_info_dict['share_cnt'],
+                   video_info_dict['duration'],
+                   f"{video_info_dict['video_width']}*{video_info_dict['video_height']}",
+                   str(video_info_dict['publish_time_str'].replace("-", "/")),
+                   str(video_info_dict['user_name']),
+                   str(video_info_dict['profile_id']),
+                   str(video_info_dict['profile_mid']),
+                   str(video_info_dict['avatar_url']),
+                   str(video_info_dict['cover_url']),
+                   str(video_info_dict['video_url'])]]
+        time.sleep(1)
+        Feishu.update_values(log_type, crawler, "yatRv2", "F2:Z2", values)
+        Common.logger(log_type, crawler).info('视频信息写入飞书成功\n')
 
 
-                    # 昨日 20:00 数据上升量
-                    if hour_sheet[i][16] is None:
-                        yesterday_twenty_cnt = 0
-                    else:
-                        yesterday_twenty_cnt = hour_sheet[i][16]
-
-                    Common.logger(log_type, crawler).info(f"视频标题:{v_title}")
-                    Common.logger(log_type, crawler).info(f"10:00 / 15:00 / 20:00 上升量: {ten_cnt} / {fifteen_cnt} / {twenty_cnt}")
-
-                    if int(time.time()) - int(v_send_time) >= 3600*24*3:
-                        Common.logger(log_type, crawler).info("抓取时间超过 3 天")
-                        return
-                    elif cls.repeat_video(log_type, crawler, v_id, env, machine) != 0:
-                        Common.logger(log_type, crawler).info('视频已下载\n')
-
-                    # 播放量大于 50000,直接下载
-                    elif int(v_play_cnt) >= 50000:
-                        Common.logger(log_type, crawler).info(f"播放量:{v_play_cnt} >= 50000,满足下载规则,开始下载视频")
-                        cls.download_video(log_type=log_type, crawler=crawler, p_id=p_id, p_mid=p_mid, v_title=v_title, v_id=v_id,
-                                           strategy=strategy, oss_endpoint=oss_endpoint, env=env, machine=machine)
-
-                    # 上升榜判断逻辑,任意时间段上升量>=5000,连续两个时间段上升量>=2000
-                    elif int(ten_cnt) >= 5000 or int(fifteen_cnt) >= 5000 or int(twenty_cnt) >= 5000:
-                        Common.logger(log_type, crawler).info(f"10:00 or 15:00 or 20:00 数据上升量:{ten_cnt} or {fifteen_cnt} or {twenty_cnt} >= 5000")
-                        Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
-                        cls.download_video(log_type=log_type, crawler=crawler, p_id=p_id, p_mid=p_mid, v_title=v_title, v_id=v_id,
-                                           strategy=strategy, oss_endpoint=oss_endpoint, env=env, machine=machine)
-
-                    elif int(ten_cnt) >= 2000 and int(fifteen_cnt) >= 2000:
-                        Common.logger(log_type, crawler).info(f"10:00 and 15:00 数据上升量:{ten_cnt} and {fifteen_cnt} >= 2000")
-                        Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
-                        cls.download_video(log_type=log_type, crawler=crawler, p_id=p_id, p_mid=p_mid, v_title=v_title, v_id=v_id,
-                                           strategy=strategy, oss_endpoint=oss_endpoint, env=env, machine=machine)
-
-                    elif int(fifteen_cnt) >= 2000 and int(twenty_cnt) >= 2000:
-                        Common.logger(log_type, crawler).info(f"15:00 and 20:00 数据上升量:{fifteen_cnt} and {twenty_cnt} >= 2000")
-                        Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
-                        cls.download_video(log_type=log_type, crawler=crawler, p_id=p_id, p_mid=p_mid, v_title=v_title, v_id=v_id,
-                                           strategy=strategy, oss_endpoint=oss_endpoint, env=env, machine=machine)
-
-                    elif int(yesterday_twenty_cnt) >= 2000 and int(ten_cnt) >= 2000:
-                        Common.logger(log_type, crawler).info(f"昨日20:00 and 今日10:00 数据上升量:{yesterday_twenty_cnt} and {ten_cnt} >= 2000")
-                        Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
-                        cls.download_video(log_type=log_type, crawler=crawler, p_id=p_id, p_mid=p_mid, v_title=v_title, v_id=v_id,
-                                           strategy=strategy, oss_endpoint=oss_endpoint, env=env, machine=machine)
+    # 下载/上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint, env):
+        # try:
+        if cls.repeat_video(log_type, crawler, video_info_dict["video_id"], env) != 0:
+                Common.logger(log_type, crawler).info('视频已下载\n')
+        # 播放量大于 50000,直接下载
+        elif int(video_info_dict["play_cnt"]) >= 50000:
+            Common.logger(log_type, crawler).info(f"播放量:{video_info_dict['play_cnt']} >= 50000,满足下载规则,开始下载视频")
+            cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
+
+        # 上升榜判断逻辑,任意时间段上升量>=5000,连续两个时间段上升量>=2000
+        elif int(update_video_info['ten_play_cnt']) >= 5000 or int(update_video_info['fifteen_play_cnt']) >= 5000 or int(update_video_info['twenty_play_cnt']) >= 5000:
+            Common.logger(log_type, crawler).info(f"10:00 or 15:00 or 20:00 数据上升量:{int(update_video_info['ten_play_cnt'])} or {int(update_video_info['fifteen_play_cnt'])} or {int(update_video_info['twenty_play_cnt'])} >= 5000")
+            Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
+            cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
+
+        elif int(update_video_info['ten_play_cnt']) >= 2000 and int(update_video_info['fifteen_play_cnt']) >= 2000:
+            Common.logger(log_type, crawler).info(f"10:00 and 15:00 数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['fifteen_play_cnt'])} >= 2000")
+            Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
+            cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
+
+        elif int(update_video_info['fifteen_play_cnt']) >= 2000 and int(update_video_info['twenty_play_cnt']) >= 2000:
+            Common.logger(log_type, crawler).info(f"15:00 and 20:00 数据上升量:{int(update_video_info['fifteen_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 2000")
+            Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
+            cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
+
+        elif int(update_video_info['ten_play_cnt']) >= 2000 and int(update_video_info['twenty_play_cnt']) >= 2000:
+            Common.logger(log_type, crawler).info(f"今日10:00 / 20:00数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 2000")
+            Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
+            cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
 
 
-                    else:
-                        Common.logger(log_type, crawler).info("上升量不满足下载规则")
-            except Exception as e:
-                Common.logger(log_type, crawler).error(f"download_publish:{e}\n")
+        else:
+            Common.logger(log_type, crawler).info("上升量不满足下载规则")
+    # except Exception as e:
+    #     Common.logger(log_type, crawler).error(f"download_publish:{e}\n")
 
 
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":
     # print(XiaoniangaoHour.filter_words("hour", "xiaoniangao"))
     # print(XiaoniangaoHour.filter_words("hour", "xiaoniangao"))
     # print(XiaoniangaoHour.get_uid_token())
     # print(XiaoniangaoHour.get_uid_token())
+    # XiaoniangaoHour.get_videoList("test", "xiaoniangao", "dev")
+    XiaoniangaoHour.update_videoList("test", "xiaoniangao", "小时榜爬虫策略", "out", "dev")
 
 
     pass
     pass

+ 8 - 12
xiaoniangao/xiaoniangao_main/run_xiaoniangao_follow.py

@@ -10,36 +10,32 @@ from common.common import Common
 from xiaoniangao.xiaoniangao_follow.xiaoniangao_follow import XiaoniangaoFollow
 from xiaoniangao.xiaoniangao_follow.xiaoniangao_follow import XiaoniangaoFollow
 
 
 
 
-def main(log_type, crawler, strategy, oss_endpoint, env, machine):
+def main(log_type, crawler, env):
     while True:
     while True:
         try:
         try:
+            if env == "dev":
+                oss_endpoint = "out"
+            else:
+                oss_endpoint = "inner"
             Common.logger(log_type, crawler).info('开始抓取 小年糕 定向榜\n')
             Common.logger(log_type, crawler).info('开始抓取 小年糕 定向榜\n')
             XiaoniangaoFollow.get_follow_videos(log_type=log_type,
             XiaoniangaoFollow.get_follow_videos(log_type=log_type,
                                                 crawler=crawler,
                                                 crawler=crawler,
-                                                strategy=strategy,
+                                                strategy="定向爬虫策略",
                                                 oss_endpoint=oss_endpoint,
                                                 oss_endpoint=oss_endpoint,
-                                                env=env,
-                                                machine=machine)
+                                                env=env)
             Common.del_logs(log_type, crawler)
             Common.del_logs(log_type, crawler)
             Common.logger(log_type, crawler).info('抓取完一轮,休眠 1 分钟\n')
             Common.logger(log_type, crawler).info('抓取完一轮,休眠 1 分钟\n')
             time.sleep(60)
             time.sleep(60)
         except Exception as e:
         except Exception as e:
             Common.logger(log_type, crawler).info(f"小年糕定向抓取异常:{e}\n")
             Common.logger(log_type, crawler).info(f"小年糕定向抓取异常:{e}\n")
-            # Feishu.bot(log_type, crawler, f"{e}")
 
 
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":
     parser = argparse.ArgumentParser()  ## 新建参数解释器对象
     parser = argparse.ArgumentParser()  ## 新建参数解释器对象
     parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
     parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
     parser.add_argument('--crawler')  ## 添加参数
     parser.add_argument('--crawler')  ## 添加参数
-    parser.add_argument('--strategy')  ## 添加参数
-    parser.add_argument('--oss_endpoint')  ## 添加参数
     parser.add_argument('--env')  ## 添加参数
     parser.add_argument('--env')  ## 添加参数
-    parser.add_argument('--machine')  ## 添加参数
     args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
     args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
     main(log_type=args.log_type,
     main(log_type=args.log_type,
          crawler=args.crawler,
          crawler=args.crawler,
-         strategy=args.strategy,
-         oss_endpoint=args.oss_endpoint,
-         env=args.env,
-         machine=args.machine)
+         env=args.env)

+ 33 - 45
xiaoniangao/xiaoniangao_main/run_xiaoniangao_hour.py

@@ -5,64 +5,52 @@ import argparse
 import datetime
 import datetime
 import os
 import os
 import sys
 import sys
-import time
 sys.path.append(os.getcwd())
 sys.path.append(os.getcwd())
 from common.common import Common
 from common.common import Common
 from xiaoniangao.xiaoniangao_hour.xiaoniangao_hour import XiaoniangaoHour
 from xiaoniangao.xiaoniangao_hour.xiaoniangao_hour import XiaoniangaoHour
 
 
 
 
-def main(log_type, crawler, strategy, oss_endpoint, env, machine):
+def main(log_type, crawler, env):
     while True:
     while True:
-        # 今天的日期:年-月-日
-        today = datetime.datetime.now().strftime("%Y/%m/%d")
-        # 昨天
-        yesterday = (datetime.date.today() + datetime.timedelta(days=-1)).strftime("%Y/%m/%d")
-        # 前天
-        before_yesterday = (datetime.date.today() + datetime.timedelta(days=-2)).strftime("%Y/%m/%d")
-
-        Common.logger(log_type, crawler).info("检查今日上升榜日期是否存在")
-        XiaoniangaoHour.check_data(log_type, crawler, today)
-
-        while True:
-            # 获取符合规则的视频,写入小时级数据_feeds
-            XiaoniangaoHour.get_videoList(log_type, crawler, env, machine)
-
-            now = datetime.datetime.now()
-            if now.hour == 10 and 0 <= now.minute <= 10:
-                Common.logger(log_type, crawler).info("开始更新上升榜")
-                XiaoniangaoHour.update_videoList(log_type, crawler, today, yesterday, before_yesterday)
-                Common.logger(log_type, crawler).info("开始下载上升榜")
-                XiaoniangaoHour.download_publish(log_type, crawler, strategy, oss_endpoint, env, machine)
-
-            elif now.hour == 15 and now.minute <= 10:
-                Common.logger(log_type, crawler).info("开始更新上升榜")
-                XiaoniangaoHour.update_videoList(log_type, crawler, today, yesterday, before_yesterday)
-                Common.logger(log_type, crawler).info("开始下载上升榜")
-                XiaoniangaoHour.download_publish(log_type, crawler, strategy, oss_endpoint, env, machine)
-
-            elif now.hour == 20 and now.minute <= 10:
-                Common.logger(log_type, crawler).info("开始更新上升榜")
-                XiaoniangaoHour.update_videoList(log_type, crawler, today, yesterday, before_yesterday)
-                Common.logger(log_type, crawler).info("开始下载上升榜")
-                XiaoniangaoHour.download_publish(log_type, crawler, strategy, oss_endpoint, env, machine)
-
-            elif 1 >= now.hour >= 0:
-                time.sleep(3600)
-                break
+        if env == "dev":
+            oss_endpoint = "out"
+        else:
+            oss_endpoint = "inner"
+        # 获取符合规则的视频,写入小时级数据_feeds
+        XiaoniangaoHour.get_videoList(log_type, crawler, env)
+        now = datetime.datetime.now()
+        if now.hour == 10 and 0 <= now.minute <= 10:
+            Common.logger(log_type, crawler).info("开始更新/下载上升榜")
+            XiaoniangaoHour.update_videoList(log_type=log_type,
+                                             crawler=crawler,
+                                             strategy="小时榜爬虫策略",
+                                             oss_endpoint=oss_endpoint,
+                                             env=env)
+
+        elif now.hour == 15 and now.minute <= 10:
+            Common.logger(log_type, crawler).info("开始更新/下载上升榜")
+            XiaoniangaoHour.update_videoList(log_type=log_type,
+                                             crawler=crawler,
+                                             strategy="小时榜爬虫策略",
+                                             oss_endpoint=oss_endpoint,
+                                             env=env)
+
+        elif now.hour == 20 and now.minute <= 10:
+            Common.logger(log_type, crawler).info("开始更新/下载上升榜")
+            XiaoniangaoHour.update_videoList(log_type=log_type,
+                                             crawler=crawler,
+                                             strategy="小时榜爬虫策略",
+                                             oss_endpoint=oss_endpoint,
+                                             env=env)
+        Common.del_logs(log_type, crawler)
 
 
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":
     parser = argparse.ArgumentParser()  ## 新建参数解释器对象
     parser = argparse.ArgumentParser()  ## 新建参数解释器对象
     parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
     parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
     parser.add_argument('--crawler')  ## 添加参数
     parser.add_argument('--crawler')  ## 添加参数
-    parser.add_argument('--strategy')  ## 添加参数
-    parser.add_argument('--oss_endpoint')  ## 添加参数
     parser.add_argument('--env')  ## 添加参数
     parser.add_argument('--env')  ## 添加参数
-    parser.add_argument('--machine')  ## 添加参数
     args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
     args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
     main(log_type=args.log_type,
     main(log_type=args.log_type,
          crawler=args.crawler,
          crawler=args.crawler,
-         strategy=args.strategy,
-         oss_endpoint=args.oss_endpoint,
-         env=args.env,
-         machine=args.machine)
+         env=args.env)

+ 39 - 0
xiaoniangao/xiaoniangao_main/run_xiaoniangao_play.py

@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/16
+import argparse
+import os
+import sys
+import time
+sys.path.append(os.getcwd())
+from common.common import Common
+from xiaoniangao.xiaoniangao_play.xiaoniangao_play import XiaoniangaoPlay
+
+
+class Main:
+    @classmethod
+    def main(cls, log_type, crawler, env):
+        while True:
+            if env == "dev":
+                oss_endpoint = "out"
+            else:
+                oss_endpoint = "inner"
+            for i in range(50):
+                Common.logger(log_type, crawler).info(f'正在抓取小年糕播放量榜,第{i+1}页\n')
+                XiaoniangaoPlay.get_videoList(log_type=log_type,
+                                              crawler=crawler,
+                                              strategy="播放量榜爬虫策略",
+                                              oss_endpoint=oss_endpoint,
+                                              env=env)
+            Common.del_logs(log_type, crawler)
+            Common.logger(log_type, crawler).info('抓取完一轮,休眠 1 分钟\n')
+            time.sleep(60)
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    Main.main(log_type=args.log_type, crawler=args.crawler, env=args.env)

+ 3 - 0
xiaoniangao/xiaoniangao_play/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/16

+ 455 - 0
xiaoniangao/xiaoniangao_play/xiaoniangao_play.py

@@ -0,0 +1,455 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/16
+import json
+import os
+import random
+import shutil
+import sys
+import time
+import requests
+import urllib3
+
+from common.scheduling_db import MysqlHelper
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.publish import Publish
+proxies = {"http": None, "https": None}
+
+
+class XiaoniangaoPlay:
+    platform = "小年糕"
+
+    # 生成 uid、token
+    @classmethod
+    def get_uid_token(cls):
+        words = "abcdefghijklmnopqrstuvwxyz0123456789"
+        uid = f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}"""
+        token = "".join(random.sample(words, 32))
+        uid_token_dict = {
+            "uid": uid,
+            "token": token
+        }
+        return uid_token_dict
+
+    # 过滤敏感词
+    @classmethod
+    def filter_words(cls, log_type, crawler):
+        try:
+            while True:
+                # 敏感词库列表
+                word_list = []
+                # 从云文档读取所有敏感词,添加到词库列表
+                filter_sheet = Feishu.get_values_batch(log_type, "xiaoniangao", "DRAnZh")
+                if filter_sheet is None:
+                    Common.logger(log_type, crawler).info(f"filter_sheet:{filter_sheet}")
+                    continue
+                for i in filter_sheet:
+                    for j in i:
+                        # 过滤空的单元格内容
+                        if j is None:
+                            pass
+                        else:
+                            word_list.append(j)
+                return word_list
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"filter_words:{e}\n")
+
+    # 基础门槛规则
+    @classmethod
+    def download_rule(cls, video_dict):
+        """
+        下载视频的基本规则
+        :param video_dict: 视频信息,字典格式
+        :return: 满足规则,返回 True;反之,返回 False
+        """
+        # 视频时长
+        if int(float(video_dict['duration'])) >= 40:
+            # 宽或高
+            if int(video_dict['video_width']) >= 0 or int(video_dict['video_height']) >= 0:
+                # 播放量
+                if int(video_dict['play_cnt']) >= 80000:
+                    # 点赞量
+                    if int(video_dict['like_cnt']) >= 0:
+                        # 分享量
+                        if int(video_dict['share_cnt']) >= 0:
+                            # 发布时间 <= 60 天
+                            if int(time.time()) - int(video_dict['publish_time_stamp']) <= 3600 * 24 * 60:
+                                return True
+                            else:
+                                return False
+                        else:
+                            return False
+                    else:
+                        return False
+                else:
+                    return False
+            return False
+        return False
+
+    # 获取表情及符号
+    @classmethod
+    def get_expression(cls):
+        while True:
+            expression_list = []
+            char_list = []
+            char_sheet = Feishu.get_values_batch("hour", "xiaoniangao", "BhlbST")
+            if char_sheet is None:
+                continue
+            for i in range(len(char_sheet)):
+                if char_sheet[i][0] is not None:
+                    expression_list.append(char_sheet[i][0])
+                if char_sheet[i][1] is not None:
+                    char_list.append(char_sheet[i][1])
+            return expression_list, char_list
+
+    # 获取列表
+    @classmethod
+    def get_videoList(cls, log_type, crawler, strategy, oss_endpoint, env):
+        uid_token_dict = cls.get_uid_token()
+        url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
+        headers = {
+            # "x-b3-traceid": cls.play_x_b3_traceid,
+            "x-b3-traceid": '1dc0a6d0929a2b',
+            # "X-Token-Id": cls.play_x_token_id,
+            "X-Token-Id": 'ae99a4953804085ebb0ae36fa138031d-1146052582',
+            # "uid": cls.play_uid,
+            "uid": uid_token_dict['uid'],
+            "content-type": "application/json",
+            "Accept-Encoding": "gzip,compress,br,deflate",
+            "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
+                          ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
+                          'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
+            # "Referer": cls.play_referer
+            "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/620/page-frame.html'
+        }
+        data = {
+            "log_params": {
+                "page": "discover_rec",
+                "common": {
+                    "brand": "iPhone",
+                    "device": "iPhone 11",
+                    "os": "iOS 14.7.1",
+                    "weixinver": "8.0.20",
+                    "srcver": "2.24.2",
+                    "net": "wifi",
+                    "scene": 1089
+                }
+            },
+            "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!750x500r/crop/750x500/interlace/1/format/jpg",
+            "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!80x80r/crop/80x80/interlace/1/format/jpg",
+            "share_width": 625,
+            "share_height": 500,
+            "ext": {
+                "fmid": 0,
+                "items": {}
+            },
+            "app": "xng",
+            "rec_scene": "discover_rec",
+            "log_common_params": {
+                "e": [{
+                    "data": {
+                        "page": "discoverIndexPage",
+                        "topic": "recommend"
+                    },
+                    "ab": {}
+                }],
+                "ext": {
+                    "brand": "iPhone",
+                    "device": "iPhone 11",
+                    "os": "iOS 14.7.1",
+                    "weixinver": "8.0.20",
+                    "srcver": "2.24.3",
+                    "net": "wifi",
+                    "scene": "1089"
+                },
+                "pj": "1",
+                "pf": "2",
+                "session_id": "7bcce313-b57d-4305-8d14-6ebd9a1bad29"
+            },
+            "refresh": False,
+            # "token": cls.play_token,
+            "token": uid_token_dict['token'],
+            # "uid": cls.play_uid,
+            "uid": uid_token_dict['uid'],
+            "proj": "ma",
+            "wx_ver": "8.0.20",
+            "code_ver": "3.62.0"
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=url, headers=headers, json=data, proxies=proxies, verify=False)
+            if "data" not in r.text or r.status_code != 200:
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}")
+            elif "data" not in r.json():
+                Common.logger(log_type, crawler).info(f"get_videoList:{r.json()}")
+            elif "list" not in r.json()["data"]:
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']}")
+            else:
+                # 视频列表数据
+                feeds = r.json()["data"]["list"]
+                for i in range(len(feeds)):
+                    # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
+                    if "title" in feeds[i]:
+                        befor_video_title = feeds[i]["title"].strip().replace("\n", "") \
+                            .replace("/", "").replace("\r", "").replace("#", "") \
+                            .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
+                            .replace(":", "").replace("*", "").replace("?", "") \
+                            .replace("?", "").replace('"', "").replace("<", "") \
+                            .replace(">", "").replace("|", "").replace(" ", "").replace("#表情", "").replace("#符号", "")
+
+                        expression = cls.get_expression()
+                        expression_list = expression[0]
+                        char_list = expression[1]
+                        # 随机取一个表情
+                        expression = random.choice(expression_list)
+                        # 生成标题list[表情+title, title+表情]
+                        expression_title_list = [expression + befor_video_title, befor_video_title + expression]
+                        # 从标题list中随机取一个标题
+                        title_list1 = random.choice(expression_title_list)
+                        # 生成标题:原标题+符号
+                        title_list2 = befor_video_title + random.choice(char_list)
+                        # 表情和标题组合,与标题和符号组合,汇总成待使用的标题列表
+                        title_list4 = [title_list2, title_list1]
+                        # 最终标题
+                        video_title = random.choice(title_list4)
+                    else:
+                        video_title = 0
+
+                    # 视频 ID
+                    if "vid" in feeds[i]:
+                        video_id = feeds[i]["vid"]
+                    else:
+                        video_id = 0
+
+                    # 播放量
+                    if "play_pv" in feeds[i]:
+                        video_play_cnt = feeds[i]["play_pv"]
+                    else:
+                        video_play_cnt = 0
+
+                    # 评论量
+                    if "comment_count" in feeds[i]:
+                        video_comment_cnt = feeds[i]["comment_count"]
+                    else:
+                        video_comment_cnt = 0
+
+                    # 点赞量
+                    if "favor" in feeds[i]:
+                        video_like_cnt = feeds[i]["favor"]["total"]
+                    else:
+                        video_like_cnt = 0
+
+                    # 分享量
+                    if "share" in feeds[i]:
+                        video_share_cnt = feeds[i]["share"]
+                    else:
+                        video_share_cnt = 0
+
+                    # 时长
+                    if "du" in feeds[i]:
+                        video_duration = int(feeds[i]["du"] / 1000)
+                    else:
+                        video_duration = 0
+
+                    # 宽和高
+                    if "w" or "h" in feeds[i]:
+                        video_width = feeds[i]["w"]
+                        video_height = feeds[i]["h"]
+                    else:
+                        video_width = 0
+                        video_height = 0
+
+                    # 发布时间
+                    if "t" in feeds[i]:
+                        video_send_time = feeds[i]["t"]
+                    else:
+                        video_send_time = 0
+                    publish_time_stamp = int(int(video_send_time)/1000)
+                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+
+                    # 用户名 / 头像
+                    if "user" in feeds[i]:
+                        user_name = feeds[i]["user"]["nick"].strip().replace("\n", "") \
+                            .replace("/", "").replace("快手", "").replace(" ", "") \
+                            .replace(" ", "").replace("&NBSP", "").replace("\r", "")
+                        head_url = feeds[i]["user"]["hurl"]
+                    else:
+                        user_name = 0
+                        head_url = 0
+
+                    # 用户 ID
+                    profile_id = feeds[i]["id"]
+
+                    # 用户 mid
+                    profile_mid = feeds[i]["user"]["mid"]
+
+                    # 视频封面
+                    if "url" in feeds[i]:
+                        cover_url = feeds[i]["url"]
+                    else:
+                        cover_url = 0
+
+                    # 视频播放地址
+                    if "v_url" in feeds[i]:
+                        video_url = feeds[i]["v_url"]
+                    else:
+                        video_url = 0
+
+                    video_dict = {
+                        "video_title": video_title,
+                        "video_id": video_id,
+                        "duration": video_duration,
+                        "play_cnt": video_play_cnt,
+                        "like_cnt": video_like_cnt,
+                        "comment_cnt": video_comment_cnt,
+                        "share_cnt": video_share_cnt,
+                        "user_name": user_name,
+                        "publish_time_stamp": publish_time_stamp,
+                        "publish_time_str": publish_time_str,
+                        "video_width": video_width,
+                        "video_height": video_height,
+                        "avatar_url": head_url,
+                        "profile_id": profile_id,
+                        "profile_mid": profile_mid,
+                        "cover_url": cover_url,
+                        "video_url": video_url,
+                        "session": f"xiaoniangao-play-{int(time.time())}"
+
+                    }
+
+                    cls.download_publish(log_type=log_type,
+                                         crawler=crawler,
+                                         video_dict=video_dict,
+                                         strategy=strategy,
+                                         oss_endpoint=oss_endpoint,
+                                         env=env)
+
+        except Exception as e:
+            Common.logger(log_type, crawler).error("get_play_feeds异常:{}", e)
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def download_publish(cls, log_type, crawler, video_dict, strategy, oss_endpoint, env):
+        # try:
+        # 过滤无效视频
+        if video_dict["video_id"] == 0 \
+                or video_dict["video_url"] == 0\
+                or video_dict["cover_url"] == 0:
+            Common.logger(log_type, crawler).warning("无效视频\n")
+        # 抓取规则
+        elif cls.download_rule(video_dict) is False:
+            Common.logger(log_type, crawler).info("不满足抓取规则")
+        # 去重
+        elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+            Common.logger(log_type, crawler).info("视频已下载\n")
+        # 过滤词库
+        elif any(str(word) if str(word) in video_dict['video_title'] else False for word in cls.filter_words(log_type, crawler)) is True:
+            Common.logger(log_type, crawler).info("视频已中过滤词\n")
+        else:
+            # 下载封面
+            Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict["video_title"], url=video_dict["cover_url"])
+            # 下载视频
+            Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
+            # 保存视频信息至 "./videos/{download_video_title}/info.txt"
+            Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+            # 上传视频
+            Common.logger(log_type, crawler).info("开始上传视频...")
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy=strategy,
+                                                      our_uid="play",
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+            if env == "dev":
+                our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+            else:
+                our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+            Common.logger(log_type, crawler).info("视频上传完成")
+
+            if our_video_id is None:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+                return
+
+            # 视频信息保存数据库
+            rule_dict = {
+                "duration": {"min": 40},
+                "play_cnt": {"min": 80000},
+                "min_publish_day": {"min": 60}
+            }
+
+            insert_sql = f""" insert into crawler_video(video_id,
+                                                        out_user_id,
+                                                        platform,
+                                                        strategy,
+                                                        out_video_id,
+                                                        video_title,
+                                                        cover_url,
+                                                        video_url,
+                                                        duration,
+                                                        publish_time,
+                                                        play_cnt,
+                                                        crawler_rule,
+                                                        width,
+                                                        height)
+                                                        values({our_video_id},
+                                                        "{video_dict['profile_id']}",
+                                                        "{cls.platform}",
+                                                        "播放量榜爬虫策略",
+                                                        "{video_dict['video_id']}",
+                                                        "{video_dict['video_title']}",
+                                                        "{video_dict['cover_url']}",
+                                                        "{video_dict['video_url']}",
+                                                        {int(video_dict['duration'])},
+                                                        "{video_dict['publish_time_str']}",
+                                                        {int(video_dict['play_cnt'])},
+                                                        '{json.dumps(rule_dict)}',
+                                                        {int(video_dict['video_width'])},
+                                                        {int(video_dict['video_height'])}) """
+            Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+            MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+            Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+
+            # 视频写入飞书
+            Feishu.insert_columns(log_type, crawler, "c85k1C", "ROWS", 1, 2)
+            # 视频ID工作表,首行写入数据
+            upload_time = int(time.time())
+            values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                       "播放量榜爬虫策略",
+                       str(video_dict['video_id']),
+                       str(video_dict['video_title']),
+                       our_video_link,
+                       video_dict['play_cnt'],
+                       video_dict['comment_cnt'],
+                       video_dict['like_cnt'],
+                       video_dict['share_cnt'],
+                       video_dict['duration'],
+                       f"{video_dict['video_width']}*{video_dict['video_height']}",
+                       str(video_dict['publish_time_str']),
+                       str(video_dict['user_name']),
+                       str(video_dict['profile_id']),
+                       str(video_dict['profile_mid']),
+                       str(video_dict['avatar_url']),
+                       str(video_dict['cover_url']),
+                       str(video_dict['video_url'])]]
+            time.sleep(1)
+            Feishu.update_values(log_type, crawler, "c85k1C", "F2:Z2", values)
+            Common.logger(log_type, crawler).info('视频信息写入飞书成功\n')
+
+        # except Exception as e:
+        #     Common.logger(log_type, crawler).error('download_publish异常:{}', e)
+
+
+if __name__ == '__main__':
+    XiaoniangaoPlay.get_videoList("play", "xiaoniangao", "播放量榜爬虫策略", "out", "dev")
+
+    pass