Browse Source

add suisuiniannianyingfuqi

wangkun 2 years ago
parent
commit
fd2ca48989

+ 11 - 0
README.MD

@@ -183,4 +183,15 @@ sh ./main/scheduling_main.sh ./benshanzhufu/benshanzhufu_main/run_benshanzhufu_r
 检测进程
 ps aux | grep run_benshanzhufu
 ps aux | grep run_benshanzhufu | grep -v grep | awk '{print $2}' | xargs kill -9
+```
+
+#### 岁岁年年迎福气小程序
+```commandline
+阿里云 102 服务器
+/usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./suisuiniannianyingfuqi/suisuiniannianyingfuqi_main/run_suisuiniannianyingfuqi_recommend.py --log_type="recommend" --crawler="suisuiniannianyingfuqi" --env="prod"  ./suisuiniannianyingfuqi/logs/nohup-recommend.log
+线下调试
+sh ./main/scheduling_main.sh ./suisuiniannianyingfuqi/suisuiniannianyingfuqi_main/run_suisuiniannianyingfuqi_recommend.py --log_type="recommend" --crawler="suisuiniannianyingfuqi" --env="dev"  ./suisuiniannianyingfuqi/logs/nohup-recommend.log
+检测进程
+ps aux | grep run_suisuiniannianyingfuqi
+ps aux | grep run_suisuiniannianyingfuqi | grep -v grep | awk '{print $2}' | xargs kill -9
 ```

BIN
benshanzhufu/.DS_Store → benshanzhufu/benshanzhufu_recommend/.DS_Store


+ 8 - 0
benshanzhufu/benshanzhufu_recommend/benshanzhufu_recommend.py

@@ -85,15 +85,23 @@ class BenshanzhufuRecommend:
             r = requests.get(headers=header, url=url, proxies=proxies, verify=False)
             if r.status_code != 200:
                 Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.text}\n")
+                cls.visitor_key = ""
+                cls.page = 1
                 return
             elif r.json()['message'] != "list success":
                 Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.json()}\n")
+                cls.visitor_key = ""
+                cls.page = 1
                 return
             elif "data" not in r.json():
                 Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.json()}\n")
+                cls.visitor_key = ""
+                cls.page = 1
                 return
             elif len(r.json()['data']["list"]) == 0:
                 Common.logger(log_type, crawler).info(f"没有更多数据了~ {r.json()}\n")
+                cls.visitor_key = ""
+                cls.page = 1
                 return
             else:
                 # 翻页

+ 108 - 2
benshanzhufu/benshanzhufu_recommend/insert.py

@@ -1,9 +1,13 @@
 # -*- coding: utf-8 -*-
 # @Author: wangkun
 # @Time: 2023/4/13
+import json
+import os
+import sys
 import time
 from datetime import date, timedelta
-
+sys.path.append(os.getcwd())
+from common.common import Common
 from common.feishu import Feishu
 from common.scheduling_db import MysqlHelper
 
@@ -69,7 +73,109 @@ class Insert:
         insert_sql = f""" insert into crawler_config(title, source, config) values("本山祝福小程序", "benshanzhufu", "{str_config_dict}") """
         MysqlHelper.update_values(log_type, crawler, insert_sql, env)
 
+    @classmethod
+    def insert_video_from_feishu_to_mysql(cls, log_type, crawler, env):
+        benshanzhufu_sheetid = ['440018']
+        for sheetid in benshanzhufu_sheetid:
+            xiaoniangao_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+            for i in range(1, len(xiaoniangao_sheet)):
+            # for i in range(1, 3):
+                if xiaoniangao_sheet[i][5] is None or xiaoniangao_sheet[i][9] is None:
+                    continue
+                video_id = xiaoniangao_sheet[i][8].replace("https://admin.piaoquantv.com/cms/post-detail/", "").replace(
+                    "/info", "")
+                if video_id == "None":
+                    continue
+                video_id = int(video_id)
+                out_user_id = str(xiaoniangao_sheet[i][17])
+                platform = "本山祝福"
+                strategy = "推荐榜爬虫策略"
+                out_video_id = str(xiaoniangao_sheet[i][6])
+                video_title = str(xiaoniangao_sheet[i][7])
+                cover_url = str(xiaoniangao_sheet[i][19])
+                video_url = str(xiaoniangao_sheet[i][20])
+                duration = int(xiaoniangao_sheet[i][13])
+                publish_time = str(xiaoniangao_sheet[i][15]).replace("/", "-")
+                play_cnt = int(xiaoniangao_sheet[i][9])
+                like_cnt = int(xiaoniangao_sheet[i][11])
+                share_cnt = int(xiaoniangao_sheet[i][12])
+                # collection_cnt = 0
+                comment_cnt = int(xiaoniangao_sheet[i][10])
+                user_id = str(xiaoniangao_sheet[i][17])
+                crawler_rule = json.dumps({})
+                width = int(xiaoniangao_sheet[i][14].split("*")[0])
+                height = int(xiaoniangao_sheet[i][14].split("*")[1])
+
+                # print(f"video_id:{video_id}, type:{type(video_id)}")
+                # print(f"user_id:{user_id}, type:{type(user_id)}")
+                # print(f"out_user_id:{out_user_id}, type:{type(out_user_id)}")
+                # print(f"platform:{platform}, type:{type(platform)}")
+                # print(f"strategy:{strategy}, type:{type(strategy)}")
+                # print(f"out_video_id:{out_video_id}, type:{type(out_video_id)}")
+                # print(f"video_title:{video_title}, type:{type(video_title)}")
+                # print(f"cover_url:{cover_url}, type:{type(cover_url)}")
+                # print(f"video_url:{video_url}, type:{type(video_url)}")
+                # print(f"duration:{duration}, type:{type(duration)}")
+                # print(f"publish_time:{publish_time}, type:{type(publish_time)}")
+                # print(f"play_cnt:{play_cnt}, type:{type(play_cnt)}")
+                # print(f"like_cnt:{like_cnt}, type:{type(like_cnt)}")
+                # print(f"share_cnt:{share_cnt}, type:{type(share_cnt)}")
+                # print(f"comment_cnt:{comment_cnt}, type:{type(comment_cnt)}")
+                # print(f"crawler_rule:{crawler_rule}, type:{type(crawler_rule)}")
+                # print(f"width:{width}, type:{type(width)}")
+                # print(f"height:{height}, type:{type(height)}\n")
+
+                select_sql = f""" select * from crawler_video where platform="{platform}" and out_video_id="{out_video_id}" """
+                Common.logger(log_type, crawler).info(f"select_sql:{select_sql}")
+                repeat_video = MysqlHelper.get_values(log_type, crawler, select_sql, env)
+                Common.logger(log_type, crawler).info(f"repeat_video:{repeat_video}")
+
+                if repeat_video is not None and len(repeat_video) != 0:
+                    Common.logger(log_type, crawler).info(f"{video_title} 已存在数据库中\n")
+                else:
+                    # 视频信息保存数据库
+                    insert_sql = f""" insert into crawler_video(video_id,
+                                        out_user_id,
+                                        platform,
+                                        strategy,
+                                        out_video_id,
+                                        video_title,
+                                        cover_url,
+                                        video_url,
+                                        duration,
+                                        publish_time,
+                                        play_cnt,
+                                        like_cnt,
+                                        share_cnt,
+                                        comment_cnt,
+                                        crawler_rule,
+                                        width,
+                                        height)
+                                        values({video_id},
+                                        "{out_user_id}",
+                                        "{platform}",
+                                        "{strategy}",
+                                        "{out_video_id}",
+                                        "{video_title}",
+                                        "{cover_url}",
+                                        "{video_url}",
+                                        {duration},
+                                        "{publish_time}",
+                                        {play_cnt},
+                                        {like_cnt},
+                                        {share_cnt},
+                                        {comment_cnt},
+                                        '{crawler_rule}',
+                                        {width},
+                                        {height}) """
+                    Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                    MysqlHelper.update_values(log_type, crawler, insert_sql, env, action='')
+                    Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+
+
 
 if __name__ == "__main__":
     # Insert.insert_config("insert", "benshanzhufu", "dev")
-    print(Insert.get_config("insert", "benshanzhufu", "filter", "dev"))
+    # print(Insert.get_config("insert", "benshanzhufu", "filter", "dev"))
+    Insert.insert_video_from_feishu_to_mysql("insert-prod", "benshanzhufu", "prod")
+    pass

+ 4 - 0
common/publish.py

@@ -200,6 +200,10 @@ class Publish:
             uids_prod_gongzhonghao_follow = [20631262, 20631263, 20631264, 20631265, 20631266, 20631267, 20631268, 20631269, 20631271, 20631272]
             return random.choice(uids_prod_gongzhonghao_follow)
 
+        elif crawler == 'suisuiniannianyingfuqi' and env == 'prod' and strategy == '推荐榜爬虫策略':
+            uids_prod_gongzhonghao_follow = [26117547, 26117548, 26117549, 26117550, 26117551]
+            return random.choice(uids_prod_gongzhonghao_follow)
+
         else:
             return our_uid
 

+ 31 - 0
main/process.sh

@@ -221,6 +221,37 @@ else
 fi
 
 
+# 本山祝福小程序爬虫
+echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 本山祝福小程序爬虫 进程状态" >> ${log_path}
+ps -ef | grep "run_benshanzhufu" | grep -v "grep"
+if [ "$?" -eq 1 ];then
+  echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
+  if [ ${env} = "dev" ];then
+    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./benshanzhufu/benshanzhufu_main/run_benshanzhufu_recommend.py --log_type="recommend" --crawler="benshanzhufu" --env="dev" benshanzhufu/logs/nohup-recommend.log
+  else
+    cd ${piaoquan_crawler_dir} && /usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./benshanzhufu/benshanzhufu_main/run_benshanzhufu_recommend.py --log_type="recommend" --crawler="benshanzhufu" --env="prod"  benshanzhufu/logs/nohup-recommend.log
+  fi
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
+else
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 本山祝福小程序爬虫 进程状态正常" >> ${log_path}
+fi
+
+# 岁岁年年迎福气小程序爬虫
+echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 本山祝福小程序爬虫 进程状态" >> ${log_path}
+ps -ef | grep "run_suisuiniannianyingfuqi" | grep -v "grep"
+if [ "$?" -eq 1 ];then
+  echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
+  if [ ${env} = "dev" ];then
+    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./suisuiniannianyingfuqi/suisuiniannianyingfuqi_main/run_suisuiniannianyingfuqi_recommend.py --log_type="recommend" --crawler="suisuiniannianyingfuqi" --env="dev" suisuiniannianyingfuqi/logs/nohup-recommend.log
+  else
+    cd ${piaoquan_crawler_dir} && /usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./suisuiniannianyingfuqi/suisuiniannianyingfuqi_main/run_suisuiniannianyingfuqi_recommend.py --log_type="recommend" --crawler="suisuiniannianyingfuqi" --env="prod"  suisuiniannianyingfuqi/logs/nohup-recommend.log
+  fi
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
+else
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 岁岁年年迎福气小程序爬虫 进程状态正常" >> ${log_path}
+fi
+
+
 # 删除日志
 echo "$(date "+%Y-%m-%d %H:%M:%S") 开始清理 5 天前的日志文件" >> ${log_path}
 find ${piaoquan_crawler_dir}main/main_logs/ -mtime +5 -name "*.log" -exec rm -rf {} \;

BIN
suisuiniannianyingfuqi/.DS_Store


+ 3 - 0
suisuiniannianyingfuqi/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13

BIN
suisuiniannianyingfuqi/logs/.DS_Store


+ 3 - 0
suisuiniannianyingfuqi/logs/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13

+ 3 - 0
suisuiniannianyingfuqi/suisuiniannianyingfuqi_main/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13

+ 28 - 0
suisuiniannianyingfuqi/suisuiniannianyingfuqi_main/run_suisuiniannianyingfuqi_recommend.py

@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13
+import argparse
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.common import Common
+from suisuiniannianyingfuqi.suisuiniannianyingfuqi_recommend.suisuiniannianyingfuqi_recommend import SuisuiniannianyingfuqiRecommend
+
+def main(log_type, crawler, env):
+    if env == "dev":
+        oss_endpoint = "out"
+    else:
+        oss_endpoint = "inner"
+    Common.logger(log_type, crawler).info('开始抓取 岁岁年年迎福气小程序\n')
+    SuisuiniannianyingfuqiRecommend.get_videoList(log_type, crawler, oss_endpoint, env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取完一轮,休眠 1 分钟\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type, crawler=args.crawler, env=args.env)

+ 3 - 0
suisuiniannianyingfuqi/suisuiniannianyingfuqi_recommend/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13

+ 160 - 0
suisuiniannianyingfuqi/suisuiniannianyingfuqi_recommend/insert.py

@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13
+import json
+import os
+import sys
+import time
+from datetime import date, timedelta
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.scheduling_db import MysqlHelper
+
+
+class Insert:
+    @classmethod
+    def get_config(cls, log_type, crawler, text, env):
+        select_sql = f"""select * from crawler_config where source="suisuiniannianyingfuqi" """
+        contents = MysqlHelper.get_values(log_type, crawler, select_sql, env, action='')
+        title_list = []
+        filter_list = []
+        for content in contents:
+            config = content['config']
+            config_dict = eval(config)
+            for k, v in config_dict.items():
+                if k == "title":
+                    title_list_config = v.split(",")
+                    for title in title_list_config:
+                        title_list.append(title)
+                if k == "filter":
+                    filter_list_config = v.split(",")
+                    for filter_word in filter_list_config:
+                        filter_list.append(filter_word)
+        if text == "title":
+            return title_list
+        elif text == "filter":
+            return filter_list
+
+    @classmethod
+    def before_day(cls):
+        publish_time_str_rule = (date.today() + timedelta(days=-30)).strftime("%Y-%m-%d %H:%M:%S")
+        publish_time_stamp_rule = int(time.mktime(time.strptime(publish_time_str_rule, "%Y-%m-%d %H:%M:%S")))
+        print(publish_time_str_rule)
+        print(publish_time_stamp_rule)
+
+    @classmethod
+    def insert_config(cls, log_type, crawler, env):
+        filter_sheet = Feishu.get_values_batch(log_type, crawler, "DjXfqG")
+        # title_sheet = Feishu.get_values_batch(log_type, crawler, "bHSW1p")
+        filter_list = []
+        # title_list = []
+        for x in filter_sheet:
+            for y in x:
+                if y is None:
+                    pass
+                else:
+                    filter_list.append(y)
+        # for x in title_sheet:
+        #     for y in x:
+        #         if y is None:
+        #             pass
+        #         else:
+        #             title_list.append(y)
+        # str_title = ','.join(title_list)
+        str_filter = ','.join(filter_list)
+        config_dict = {
+            # "title": str_title,
+            "filter": str_filter
+        }
+        str_config_dict = str(config_dict)
+        # print(f"config_dict:{config_dict}")
+        # print(f"str_config_dict:{str_config_dict}")
+        insert_sql = f""" insert into crawler_config(title, source, config) values("本山祝福小程序", "benshanzhufu", "{str_config_dict}") """
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+
+    @classmethod
+    def insert_video_from_feishu_to_mysql(cls, log_type, crawler, env):
+        benshanzhufu_sheetid = ['290bae']
+        for sheetid in benshanzhufu_sheetid:
+            xiaoniangao_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+            for i in range(1, len(xiaoniangao_sheet)):
+            # for i in range(1, 3):
+                if xiaoniangao_sheet[i][5] is None or xiaoniangao_sheet[i][9] is None:
+                    continue
+                video_id = xiaoniangao_sheet[i][9].replace("https://admin.piaoquantv.com/cms/post-detail/", "").replace(
+                    "/info", "")
+                if video_id == "None":
+                    continue
+                video_id = int(video_id)
+                platform = "岁岁年年迎福气"
+                strategy = "推荐榜爬虫策略"
+                out_video_id = str(xiaoniangao_sheet[i][8])
+                video_title = str(xiaoniangao_sheet[i][7])
+                cover_url = str(xiaoniangao_sheet[i][13])
+                video_url = str(xiaoniangao_sheet[i][14])
+                duration = int(xiaoniangao_sheet[i][11])
+                play_cnt = int(xiaoniangao_sheet[i][10])
+                crawler_rule = json.dumps({})
+                width = int(xiaoniangao_sheet[i][12].split("*")[0])
+                height = int(xiaoniangao_sheet[i][12].split("*")[1])
+
+                # print(f"video_id:{video_id}, type:{type(video_id)}")
+                # print(f"platform:{platform}, type:{type(platform)}")
+                # print(f"strategy:{strategy}, type:{type(strategy)}")
+                # print(f"out_video_id:{out_video_id}, type:{type(out_video_id)}")
+                # print(f"video_title:{video_title}, type:{type(video_title)}")
+                # print(f"cover_url:{cover_url}, type:{type(cover_url)}")
+                # print(f"video_url:{video_url}, type:{type(video_url)}")
+                # print(f"duration:{duration}, type:{type(duration)}")
+                # print(f"play_cnt:{play_cnt}, type:{type(play_cnt)}")
+                # print(f"crawler_rule:{crawler_rule}, type:{type(crawler_rule)}")
+                # print(f"width:{width}, type:{type(width)}")
+                # print(f"height:{height}, type:{type(height)}\n")
+
+                select_sql = f""" select * from crawler_video where platform="{platform}" and out_video_id="{out_video_id}" """
+                Common.logger(log_type, crawler).info(f"select_sql:{select_sql}")
+                repeat_video = MysqlHelper.get_values(log_type, crawler, select_sql, env)
+                Common.logger(log_type, crawler).info(f"repeat_video:{repeat_video}")
+
+                if repeat_video is not None and len(repeat_video) != 0:
+                    Common.logger(log_type, crawler).info(f"{video_title} 已存在数据库中\n")
+                else:
+                    # 视频信息保存数据库
+                    insert_sql = f""" insert into crawler_video(video_id,
+                                        out_user_id,
+                                        platform,
+                                        strategy,
+                                        out_video_id,
+                                        video_title,
+                                        cover_url,
+                                        video_url,
+                                        duration,
+                                        play_cnt,
+                                        crawler_rule,
+                                        width,
+                                        height)
+                                        values({video_id},
+                                        "suisuiniannianyingfuqi",
+                                        "{platform}",
+                                        "{strategy}",
+                                        "{out_video_id}",
+                                        "{video_title}",
+                                        "{cover_url}",
+                                        "{video_url}",
+                                        {duration},
+                                        {play_cnt},
+                                        '{crawler_rule}',
+                                        {width},
+                                        {height}) """
+                    Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                    MysqlHelper.update_values(log_type, crawler, insert_sql, env, action='')
+                    Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+
+
+if __name__ == "__main__":
+    # Insert.insert_config("insert", "suisuiniannianyingfuqi", "dev")
+    # print(Insert.get_config("insert", "suisuiniannianyingfuqi", "filter", "dev"))
+    # Insert.insert_video_from_feishu_to_mysql("insert-dev", "suisuiniannianyingfuqi", "dev")
+    Insert.insert_video_from_feishu_to_mysql("insert-prod", "suisuiniannianyingfuqi", "prod")
+    pass

+ 207 - 0
suisuiniannianyingfuqi/suisuiniannianyingfuqi_recommend/suisuiniannianyingfuqi_recommend.py

@@ -0,0 +1,207 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13
+import json
+import os
+import shutil
+import sys
+import time
+from hashlib import md5
+import requests
+import urllib3
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.publish import Publish
+from common.scheduling_db import MysqlHelper
+
+
+class SuisuiniannianyingfuqiRecommend:
+    page = 0
+    platform = "岁岁年年迎福气"
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="岁岁年年迎福气" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, oss_endpoint, env):
+        while True:
+            # try:
+            url = 'https://www.jzkksp.com/index/home/get_home_list.html'
+            headers = {
+                'content-type': 'application/x-www-form-urlencoded',
+                'Accept-Encoding': 'gzip,compress,br,deflate',
+                'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_3_1 like Mac OS X) '
+                              'AppleWebKit/605.1.15 (KHTML, like Gecko) '
+                              'Mobile/15E148 MicroMessenger/8.0.25(0x1800192b) NetType/WIFI Language/zh_CN',
+                'Referer': 'https://servicewechat.com/wxd4c54f60812f6f36/1/page-frame.html',
+            }
+            cls.page += 1
+            data = {
+                'token': '851ae159fd33f955bf433e7c47a4a298',
+                'time': '1667905857000',
+                'str_data': 'uT551tU8',
+                'page': str(cls.page),
+                'limit': '10',
+                'appid': 'wxd4c54f60812f6f36',
+                'version': '1.4.1',
+                'openid': 'oDAjy5SCFe7Ml3PNgiow3ncozL1o'
+            }
+            urllib3.disable_warnings()
+            response = requests.post(url=url, headers=headers, data=data, verify=False)
+            if response.status_code != 200:
+                Common.logger(log_type, crawler).warning(f'get_videoList:{response.status_code}, {response.text}\n')
+                cls.page = 0
+                return
+            if 'data' not in response.json():
+                Common.logger(log_type, crawler).warning(f'get_videoList:{response.status_code}, {response.json()}\n')
+                cls.page = 0
+                return
+            elif len(response.json()['data']['video_list']['data']) == 0:
+                Common.logger(log_type, crawler).info(f'没有更多数据啦~ {response.json()}\n')
+                cls.page = 0
+                return
+            else:
+                feeds = response.json()['data']['video_list']['data']
+                # Common.logger(log_type, crawler).info('page:{}\n', cls.page)
+                for i in range(len(feeds)):
+                    video_title = feeds[i].get('title', "").replace("'", "").replace('"', '')
+                    video_id = str(feeds[i].get('id', ''))
+                    play_cnt = feeds[i].get('browse', 0)
+                    comment_cnt = 0
+                    like_cnt = 0
+                    share_cnt = 0
+                    publish_time_str = feeds[i].get('createtime', '')
+                    publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y-%m-%d")))
+                    user_name = "岁岁年年迎福气"
+                    user_id = "suisuiniannianyingfuqi"
+                    cover_url = feeds[i].get('thumb', '')
+                    video_url = feeds[i].get('url', '')
+
+                    video_dict = {'video_title': video_title,
+                                  'video_id': video_id,
+                                  'play_cnt': play_cnt,
+                                  'comment_cnt': comment_cnt,
+                                  'like_cnt': like_cnt,
+                                  'share_cnt': share_cnt,
+                                  'publish_time_stamp': publish_time_stamp,
+                                  'publish_time_str': publish_time_str,
+                                  'user_name': user_name,
+                                  'user_id': user_id,
+                                  'avatar_url': cover_url,
+                                  'cover_url': cover_url,
+                                  'video_url': video_url,
+                                  'session': f"suisuiniannianyingfuqi-{int(time.time())}"}
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                    if video_id == '' or video_title == '' or cover_url == '' or video_url == '':
+                        Common.logger(log_type, crawler).info('无效视频\n')
+                    elif cls.repeat_video(log_type, crawler, video_id, env) != 0:
+                        Common.logger(log_type, crawler).info('视频已下载\n')
+                    else:
+                        cls.download_publish(log_type, crawler, video_dict, oss_endpoint, env)
+
+            # except Exception as e:
+            #     Common.logger(log_type, crawler).error('get_feeds异常:{}\n', e)
+
+# 下载 / 上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, video_dict, oss_endpoint, env):
+        # try:
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+        if ffmpeg_dict is None:
+            md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+            shutil.rmtree(f"./{crawler}/videos/{md_title}/")
+            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            return
+        video_dict["duration"] = ffmpeg_dict["duration"]
+        video_dict["video_width"] = ffmpeg_dict["width"]
+        video_dict["video_height"] = ffmpeg_dict["height"]
+
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
+        # 保存视频信息至txt
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                  crawler=crawler,
+                                                  strategy="推荐榜爬虫策略",
+                                                  our_uid="recommend",
+                                                  env=env,
+                                                  oss_endpoint=oss_endpoint)
+        if env == 'dev':
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        else:
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        Common.logger(log_type, crawler).info("视频上传完成")
+
+        if our_video_id is None:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+            return
+
+        # 视频写入飞书
+        Feishu.insert_columns(log_type, crawler, "290bae", "ROWS", 1, 2)
+        upload_time = int(time.time())
+        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                   "推荐榜爬虫策略",
+                   video_dict['video_title'],
+                   video_dict['video_id'],
+                   our_video_link,
+                   video_dict['play_cnt'],
+                   video_dict['duration'],
+                   f"{video_dict['video_width']}*{video_dict['video_height']}",
+                   video_dict['cover_url'],
+                   video_dict['video_url']]]
+        time.sleep(0.5)
+        Feishu.update_values(log_type, crawler, "290bae", "F2:Z2", values)
+        Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+
+        rule_dict = {}
+        # 视频信息保存数据库
+        insert_sql = f""" insert into crawler_video(video_id,
+                                                out_user_id,
+                                                platform,
+                                                strategy,
+                                                out_video_id,
+                                                video_title,
+                                                cover_url,
+                                                video_url,
+                                                duration,
+                                                publish_time,
+                                                play_cnt,
+                                                crawler_rule,
+                                                width,
+                                                height)
+                                                values({our_video_id},
+                                                "{video_dict['user_id']}",
+                                                "{cls.platform}",
+                                                "推荐榜爬虫策略",
+                                                "{video_dict['video_id']}",
+                                                "{video_dict['video_title']}",
+                                                "{video_dict['cover_url']}",
+                                                "{video_dict['video_url']}",
+                                                {int(video_dict['duration'])},
+                                                "{video_dict['publish_time_str']}",
+                                                {int(video_dict['play_cnt'])},
+                                                '{json.dumps(rule_dict)}',
+                                                {int(video_dict['video_width'])},
+                                                {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env, action='')
+        Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+
+        # except Exception as e:
+        #     Common.logger(log_type, crawler).error(f"download_publish异常:{e}\n")
+
+
+if __name__ == '__main__':
+    SuisuiniannianyingfuqiRecommend.get_videoList('recommend', 'suisuiniannianyingfuqi', 'out', 'dev')

BIN
suisuiniannianyingfuqi/videos/.DS_Store