ソースを参照

update suisuiniannian

wangkun 1 年間 前
コミット
3e9201c31c

+ 2 - 6
suisuiniannianyingfuqi/suisuiniannianyingfuqi_main/run_suisuiniannianyingfuqi_recommend.py

@@ -9,14 +9,10 @@ from common.common import Common
 from suisuiniannianyingfuqi.suisuiniannianyingfuqi_recommend.suisuiniannianyingfuqi_recommend import SuisuiniannianyingfuqiRecommend
 
 def main(log_type, crawler, env):
-    if env == "dev":
-        oss_endpoint = "out"
-    else:
-        oss_endpoint = "inner"
     Common.logger(log_type, crawler).info('开始抓取 岁岁年年迎福气小程序\n')
-    SuisuiniannianyingfuqiRecommend.get_videoList(log_type, crawler, oss_endpoint, env)
+    SuisuiniannianyingfuqiRecommend.get_videoList(log_type, crawler, env)
     Common.del_logs(log_type, crawler)
-    Common.logger(log_type, crawler).info('抓取完一轮,休眠 1 分钟\n')
+    Common.logger(log_type, crawler).info('抓取完一轮\n')
 
 
 if __name__ == "__main__":

+ 102 - 95
suisuiniannianyingfuqi/suisuiniannianyingfuqi_recommend/suisuiniannianyingfuqi_recommend.py

@@ -3,6 +3,7 @@
 # @Time: 2023/4/13
 import json
 import os
+import random
 import shutil
 import sys
 import time
@@ -14,10 +15,10 @@ from common.common import Common
 from common.feishu import Feishu
 from common.publish import Publish
 from common.scheduling_db import MysqlHelper
+# from common.public import download_rule
 
 
 class SuisuiniannianyingfuqiRecommend:
-    page = 0
     platform = "岁岁年年迎福气"
 
     @classmethod
@@ -27,99 +28,94 @@ class SuisuiniannianyingfuqiRecommend:
         return len(repeat_video)
 
     @classmethod
-    def get_videoList(cls, log_type, crawler, oss_endpoint, env):
+    def get_videoList(cls, log_type, crawler, env):
+        page = 1
         while True:
-            # try:
-            url = 'https://www.jzkksp.com/index/home/get_home_list.html'
-            headers = {
-                'content-type': 'application/x-www-form-urlencoded',
-                'Accept-Encoding': 'gzip,compress,br,deflate',
-                'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_3_1 like Mac OS X) '
-                              'AppleWebKit/605.1.15 (KHTML, like Gecko) '
-                              'Mobile/15E148 MicroMessenger/8.0.25(0x1800192b) NetType/WIFI Language/zh_CN',
-                'Referer': 'https://servicewechat.com/wxd4c54f60812f6f36/1/page-frame.html',
-            }
-            cls.page += 1
-            data = {
-                'token': '851ae159fd33f955bf433e7c47a4a298',
-                'time': '1667905857000',
-                'str_data': 'uT551tU8',
-                'page': str(cls.page),
-                'limit': '10',
-                'appid': 'wxd4c54f60812f6f36',
-                'version': '1.4.1',
-                'openid': 'oDAjy5SCFe7Ml3PNgiow3ncozL1o'
-            }
-            urllib3.disable_warnings()
-            response = requests.post(url=url, headers=headers, data=data, verify=False)
-            if response.status_code != 200:
-                Common.logger(log_type, crawler).warning(f'get_videoList:{response.status_code}, {response.text}\n')
-                cls.page = 0
-                return
-            if 'data' not in response.json():
-                Common.logger(log_type, crawler).warning(f'get_videoList:{response.status_code}, {response.json()}\n')
-                cls.page = 0
-                return
-            elif len(response.json()['data']['video_list']['data']) == 0:
-                Common.logger(log_type, crawler).info(f'没有更多数据啦~ {response.json()}\n')
-                cls.page = 0
-                return
-            else:
-                feeds = response.json()['data']['video_list']['data']
-                # Common.logger(log_type, crawler).info('page:{}\n', cls.page)
-                for i in range(len(feeds)):
-                    video_title = feeds[i].get('title', "").replace("'", "").replace('"', '')
-                    video_id = str(feeds[i].get('id', ''))
-                    play_cnt = feeds[i].get('browse', 0)
-                    comment_cnt = 0
-                    like_cnt = 0
-                    share_cnt = 0
-                    publish_time_str = feeds[i].get('createtime', '')
-                    publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y-%m-%d")))
-                    user_name = "岁岁年年迎福气"
-                    user_id = "suisuiniannianyingfuqi"
-                    cover_url = feeds[i].get('thumb', '')
-                    video_url = feeds[i].get('url', '')
-
-                    video_dict = {'video_title': video_title,
-                                  'video_id': video_id,
-                                  'play_cnt': play_cnt,
-                                  'comment_cnt': comment_cnt,
-                                  'like_cnt': like_cnt,
-                                  'share_cnt': share_cnt,
-                                  'publish_time_stamp': publish_time_stamp,
-                                  'publish_time_str': publish_time_str,
-                                  'user_name': user_name,
-                                  'user_id': user_id,
-                                  'avatar_url': cover_url,
-                                  'cover_url': cover_url,
-                                  'video_url': video_url,
-                                  'session': f"suisuiniannianyingfuqi-{int(time.time())}"}
-                    for k, v in video_dict.items():
-                        Common.logger(log_type, crawler).info(f"{k}:{v}")
-
-                    if video_id == '' or video_title == '' or cover_url == '' or video_url == '':
-                        Common.logger(log_type, crawler).info('无效视频\n')
-                    elif cls.repeat_video(log_type, crawler, video_id, env) != 0:
-                        Common.logger(log_type, crawler).info('视频已下载\n')
-                    else:
-                        cls.download_publish(log_type, crawler, video_dict, oss_endpoint, env)
-
-            # except Exception as e:
-            #     Common.logger(log_type, crawler).error('get_feeds异常:{}\n', e)
+            try:
+                url = 'https://www.jzkksp.com/index/home/get_home_list.html'
+                headers = {
+                    'content-type': 'application/x-www-form-urlencoded',
+                    'Accept-Encoding': 'gzip,compress,br,deflate',
+                    'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_3_1 like Mac OS X) '
+                                  'AppleWebKit/605.1.15 (KHTML, like Gecko) '
+                                  'Mobile/15E148 MicroMessenger/8.0.25(0x1800192b) NetType/WIFI Language/zh_CN',
+                    'Referer': 'https://servicewechat.com/wxd4c54f60812f6f36/1/page-frame.html',
+                }
+                data = {
+                    'token': '851ae159fd33f955bf433e7c47a4a298',
+                    'time': '1667905857000',
+                    'str_data': 'uT551tU8',
+                    'page': str(page),
+                    'limit': '10',
+                    'appid': 'wxd4c54f60812f6f36',
+                    'version': '1.4.1',
+                    'openid': 'oDAjy5SCFe7Ml3PNgiow3ncozL1o'
+                }
+                urllib3.disable_warnings()
+                response = requests.post(url=url, headers=headers, data=data, verify=False)
+                page += 1
+                if response.status_code != 200:
+                    Common.logger(log_type, crawler).warning(f'get_videoList:{response.status_code}, {response.text}\n')
+                    return
+                elif 'data' not in response.json():
+                    Common.logger(log_type, crawler).warning(f'get_videoList:{response.status_code}, {response.json()}\n')
+                    return
+                elif len(response.json()['data']['video_list']['data']) == 0:
+                    Common.logger(log_type, crawler).info(f'没有更多数据啦~ {response.json()}\n')
+                    return
+                else:
+                    feeds = response.json()['data']['video_list']['data']
+                    for i in range(len(feeds)):
+                        try:
+                            publish_time_str = feeds[i].get('createtime', '')
+                            publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y-%m-%d")))
+                            video_dict = {'video_title': feeds[i].get('title', "").replace("'", "").replace('"', ''),
+                                          'video_id': str(feeds[i].get('id', '')),
+                                          'play_cnt': feeds[i].get('browse', 0),
+                                          'comment_cnt': 0,
+                                          'like_cnt': 0,
+                                          'share_cnt': 0,
+                                          'publish_time_stamp': publish_time_stamp,
+                                          'publish_time_str': publish_time_str,
+                                          'user_name': "岁岁年年迎福气",
+                                          'user_id': "suisuiniannianyingfuqi",
+                                          'avatar_url': feeds[i].get('thumb', ''),
+                                          'cover_url': feeds[i].get('thumb', ''),
+                                          'video_url': feeds[i].get('url', ''),
+                                          'session': f"suisuiniannianyingfuqi-{int(time.time())}"}
+                            for k, v in video_dict.items():
+                                Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                            if video_dict["video_id"] == '' or video_dict["video_title"] == '' or video_dict["cover_url"] == '' or video_dict["video_url"] == '':
+                                Common.logger(log_type, crawler).info('无效视频\n')
+                            elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
+                                Common.logger(log_type, crawler).info('视频已下载\n')
+                            else:
+                                cls.download_publish(log_type, crawler, video_dict, env)
+                        except Exception as e:
+                            Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+            except Exception as e:
+                Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
 
 # 下载 / 上传
     @classmethod
-    def download_publish(cls, log_type, crawler, video_dict, oss_endpoint, env):
-        # try:
+    def download_publish(cls, log_type, crawler, video_dict, env):
         # 下载视频
         Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
-        ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
-        if ffmpeg_dict is None:
-            md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-            shutil.rmtree(f"./{crawler}/videos/{md_title}/")
-            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
             return
+
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
         video_dict["duration"] = ffmpeg_dict["duration"]
         video_dict["video_width"] = ffmpeg_dict["width"]
         video_dict["video_height"] = ffmpeg_dict["height"]
@@ -128,13 +124,24 @@ class SuisuiniannianyingfuqiRecommend:
         Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
         # 保存视频信息至txt
         Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+        if env == "dev":
+            oss_endpoint = "out"
+        else:
+            oss_endpoint = "inner"
+
+        select_user_sql = f"""select * from crawler_user_v3 where source="suisuiniannianyingfuqi" """
+        user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+        our_uid_list = []
+        for user in user_list:
+            our_uid_list.append(user["uid"])
+        our_uid = random.choice(our_uid_list)
 
         # 上传视频
         Common.logger(log_type, crawler).info("开始上传视频...")
         our_video_id = Publish.upload_and_publish(log_type=log_type,
                                                   crawler=crawler,
                                                   strategy="推荐榜爬虫策略",
-                                                  our_uid="recommend",
+                                                  our_uid=our_uid,
                                                   env=env,
                                                   oss_endpoint=oss_endpoint)
         if env == 'dev':
@@ -144,9 +151,12 @@ class SuisuiniannianyingfuqiRecommend:
         Common.logger(log_type, crawler).info("视频上传完成")
 
         if our_video_id is None:
-            # 删除视频文件夹
-            shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-            return
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
 
         # 视频写入飞书
         Feishu.insert_columns(log_type, crawler, "290bae", "ROWS", 1, 2)
@@ -199,9 +209,6 @@ class SuisuiniannianyingfuqiRecommend:
         MysqlHelper.update_values(log_type, crawler, insert_sql, env, action='')
         Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
 
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error(f"download_publish异常:{e}\n")
-
 
 if __name__ == '__main__':
-    SuisuiniannianyingfuqiRecommend.get_videoList('recommend', 'suisuiniannianyingfuqi', 'out', 'dev')
+    pass

+ 0 - 3
suisuiniannianyingfuqi/suisuiniannianyingfuqi_recommend/suisuiniannianyingfuqi_recommend_scheduling.py

@@ -55,15 +55,12 @@ class SuisuiniannianyingfuqiRecommendScheduling:
                 page += 1
                 if response.status_code != 200:
                     Common.logger(log_type, crawler).warning(f'get_videoList:{response.status_code}, {response.text}\n')
-                    cls.page = 0
                     return
                 elif 'data' not in response.json():
                     Common.logger(log_type, crawler).warning(f'get_videoList:{response.status_code}, {response.json()}\n')
-                    cls.page = 0
                     return
                 elif len(response.json()['data']['video_list']['data']) == 0:
                     Common.logger(log_type, crawler).info(f'没有更多数据啦~ {response.json()}\n')
-                    cls.page = 0
                     return
                 else:
                     feeds = response.json()['data']['video_list']['data']