wangkun 2 年之前
父节点
当前提交
aaabe3a0bc
共有 2 个文件被更改,包括 312 次插入305 次删除
  1. 10 6
      README.MD
  2. 302 299
      xiaoniangao/xiaoniangao_hour/xiaoniangao_hour.py

+ 10 - 6
README.MD

@@ -104,20 +104,24 @@ ps aux | grep run_kuaishou | grep -v grep | awk '{print $2}' | xargs kill -9
 #### 小年糕
 ```commandline
 阿里云 102 服务器
-定向爬虫策略: sh ./main/shceduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_follow.py --log_type="follow" --crawler="xiaoniangao" --env="prod"  xiaoniangao/nohup-follow.log
-小时榜爬虫策略: sh ./main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_hour.py --log_type="hour" --crawler="xiaoniangao" --env="prod" xiaoniangao/nohup-hour.log
-播放量榜爬虫策略: sh ./main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_play.py --log_type="hour" --crawler="xiaoniangao" --env="prod" xiaoniangao/nohup-play.log
+定向爬虫策略: ps aux | grep run_xiaoniangao_follow | grep -v grep | awk '{print $2}' | xargs kill -9 && /usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_follow.py --log_type="follow" --crawler="xiaoniangao" --env="prod"  xiaoniangao/nohup-follow.log
+小时榜爬虫策略: ps aux | grep run_xiaoniangao_hour | grep -v grep | awk '{print $2}' | xargs kill -9 && /usr/bin/sh sh /data5/piaoquan_crawler/main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_hour.py --log_type="hour" --crawler="xiaoniangao" --env="prod" xiaoniangao/nohup-hour.log
+播放量榜爬虫策略: ps aux | grep run_xiaoniangao_play | grep -v grep | awk '{print $2}' | xargs kill -9 && /usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_play.py --log_type="play" --crawler="xiaoniangao" --env="prod" xiaoniangao/nohup-play.log
 
 线下调试
 定向爬虫策略: sh ./main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_follow.py --log_type="follow" --crawler="xiaoniangao" --env="dev" xiaoniangao/nohup-follow.log
 小时榜爬虫策略: sh ./main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_hour.py --log_type="hour" --crawler="xiaoniangao" --env="dev" xiaoniangao/nohup-hour.log
-播放量榜爬虫策略: sh ./main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_play.py --log_type="hour" --crawler="xiaoniangao" --env="dev" xiaoniangao/nohup-play.log
+播放量榜爬虫策略: sh ./main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_play.py --log_type="play" --crawler="xiaoniangao" --env="dev" xiaoniangao/nohup-play.log
 
 nohup python3 -u xiaoniangao/xiaoniangao_follow/insert_video_1.py >> xiaoniangao/nohup-1.log 2>&1 &
 nohup python3 -u xiaoniangao/xiaoniangao_follow/insert_video_2.py >> xiaoniangao/nohup-1.log 2>&1 &
 nohup python3 -u xiaoniangao/xiaoniangao_follow/insert_video_3.py >> xiaoniangao/nohup-1.log 2>&1 &
 
 杀进程命令
-ps aux | grep run_xiaoniangao
-ps aux | grep run_xiaoniangao | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep run_xiaoniangao_follow
+ps aux | grep run_xiaoniangao_hour
+ps aux | grep run_xiaoniangao_play
+ps aux | grep run_xiaoniangao_follow | grep -v grep | awk '{print $2}' | xargs kill -9 
+ps aux | grep run_xiaoniangao_hour | grep -v grep | awk '{print $2}' | xargs kill -9 
+ps aux | grep run_xiaoniangao_play | grep -v grep | awk '{print $2}' | xargs kill -9 
 ```

+ 302 - 299
xiaoniangao/xiaoniangao_hour/xiaoniangao_hour.py

@@ -119,268 +119,268 @@ class XiaoniangaoHour:
     # 获取列表
     @classmethod
     def get_videoList(cls, log_type, crawler, env):
-        # try:
-        uid_token_dict = cls.get_uid_token()
-        url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
-        headers = {
-            # "x-b3-traceid": cls.hour_x_b3_traceid,
-            "x-b3-traceid": '1c403a4aa72e3c',
-            # "X-Token-Id": cls.hour_x_token_id,
-            "X-Token-Id": 'ab619e96d801f1567388629260aa68ec-1202200806',
-            # "uid": cls.hour_uid,
-            "uid": uid_token_dict['uid'],
-            "content-type": "application/json",
-            "Accept-Encoding": "gzip,compress,br,deflate",
-            "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
-                          ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
-                          'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
-            # "Referer": cls.hour_referer
-            "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/624/page-frame.html'
-        }
-        data = {
-        "log_params": {
-            "page": "discover_rec",
-            "common": {
-                "brand": "iPhone",
-                "device": "iPhone 11",
-                "os": "iOS 14.7.1",
-                "weixinver": "8.0.20",
-                "srcver": "2.24.2",
-                "net": "wifi",
-                "scene": 1089
+        try:
+            uid_token_dict = cls.get_uid_token()
+            url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
+            headers = {
+                # "x-b3-traceid": cls.hour_x_b3_traceid,
+                "x-b3-traceid": '1c403a4aa72e3c',
+                # "X-Token-Id": cls.hour_x_token_id,
+                "X-Token-Id": 'ab619e96d801f1567388629260aa68ec-1202200806',
+                # "uid": cls.hour_uid,
+                "uid": uid_token_dict['uid'],
+                "content-type": "application/json",
+                "Accept-Encoding": "gzip,compress,br,deflate",
+                "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
+                              ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
+                              'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
+                # "Referer": cls.hour_referer
+                "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/624/page-frame.html'
             }
-        },
-        "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!750x500r/crop/750x500/interlace/1/format/jpg",
-        "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!80x80r/crop/80x80/interlace/1/format/jpg",
-        "share_width": 625,
-        "share_height": 500,
-        "ext": {
-            "fmid": 0,
-            "items": {}
-        },
-        "app": "xng",
-        "rec_scene": "discover_rec",
-        "log_common_params": {
-            "e": [{
-                "data": {
-                    "page": "discoverIndexPage",
-                    "topic": "recommend"
-                },
-                "ab": {}
-            }],
+            data = {
+            "log_params": {
+                "page": "discover_rec",
+                "common": {
+                    "brand": "iPhone",
+                    "device": "iPhone 11",
+                    "os": "iOS 14.7.1",
+                    "weixinver": "8.0.20",
+                    "srcver": "2.24.2",
+                    "net": "wifi",
+                    "scene": 1089
+                }
+            },
+            "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!750x500r/crop/750x500/interlace/1/format/jpg",
+            "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!80x80r/crop/80x80/interlace/1/format/jpg",
+            "share_width": 625,
+            "share_height": 500,
             "ext": {
-                "brand": "iPhone",
-                "device": "iPhone 11",
-                "os": "iOS 14.7.1",
-                "weixinver": "8.0.20",
-                "srcver": "2.24.3",
-                "net": "wifi",
-                "scene": "1089"
+                "fmid": 0,
+                "items": {}
             },
-            "pj": "1",
-            "pf": "2",
-            "session_id": "7bcce313-b57d-4305-8d14-6ebd9a1bad29"
-        },
-        "refresh": False,
-        "token": uid_token_dict["token"],
-        "uid": uid_token_dict["uid"],
-        "proj": "ma",
-        "wx_ver": "8.0.20",
-        "code_ver": "3.62.0"
-    }
-
-        urllib3.disable_warnings()
-        r = requests.post(url=url, headers=headers, json=data, proxies=proxies, verify=False)
-        if 'data' not in r.text or r.status_code != 200:
-            Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
-        elif "data" not in r.json():
-            Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()}\n")
-        elif "list" not in r.json()["data"]:
-            Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']}\n")
-        elif len(r.json()['data']['list']) == 0:
-            Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']['list']}\n")
-        else:
-            # 视频列表数据
-            feeds = r.json()["data"]["list"]
-            for i in range(len(feeds)):
-                # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
-                if "title" in feeds[i]:
-                    befor_video_title = feeds[i]["title"].strip().replace("\n", "") \
-                        .replace("/", "").replace("\r", "").replace("#", "") \
-                        .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
-                        .replace(":", "").replace("*", "").replace("?", "") \
-                        .replace("?", "").replace('"', "").replace("<", "") \
-                        .replace(">", "").replace("|", "").replace(" ", "").replace("#表情", "").replace("#符号", "")
-
-                    expression = cls.get_expression()
-                    expression_list = expression[0]
-                    char_list = expression[1]
-                    # 随机取一个表情
-                    expression = random.choice(expression_list)
-                    # 生成标题list[表情+title, title+表情]
-                    expression_title_list = [expression + befor_video_title, befor_video_title + expression]
-                    # 从标题list中随机取一个标题
-                    title_list1 = random.choice(expression_title_list)
-                    # 生成标题:原标题+符号
-                    title_list2 = befor_video_title + random.choice(char_list)
-                    # 表情和标题组合,与标题和符号组合,汇总成待使用的标题列表
-                    title_list4 = [title_list2, title_list1]
-                    # 最终标题
-                    video_title = random.choice(title_list4)
-                else:
-                    video_title = 0
+            "app": "xng",
+            "rec_scene": "discover_rec",
+            "log_common_params": {
+                "e": [{
+                    "data": {
+                        "page": "discoverIndexPage",
+                        "topic": "recommend"
+                    },
+                    "ab": {}
+                }],
+                "ext": {
+                    "brand": "iPhone",
+                    "device": "iPhone 11",
+                    "os": "iOS 14.7.1",
+                    "weixinver": "8.0.20",
+                    "srcver": "2.24.3",
+                    "net": "wifi",
+                    "scene": "1089"
+                },
+                "pj": "1",
+                "pf": "2",
+                "session_id": "7bcce313-b57d-4305-8d14-6ebd9a1bad29"
+            },
+            "refresh": False,
+            "token": uid_token_dict["token"],
+            "uid": uid_token_dict["uid"],
+            "proj": "ma",
+            "wx_ver": "8.0.20",
+            "code_ver": "3.62.0"
+        }
 
-                # 视频 ID
-                if "vid" in feeds[i]:
-                    video_id = feeds[i]["vid"]
-                else:
-                    video_id = 0
+            urllib3.disable_warnings()
+            r = requests.post(url=url, headers=headers, json=data, proxies=proxies, verify=False)
+            if 'data' not in r.text or r.status_code != 200:
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+            elif "data" not in r.json():
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()}\n")
+            elif "list" not in r.json()["data"]:
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']}\n")
+            elif len(r.json()['data']['list']) == 0:
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']['list']}\n")
+            else:
+                # 视频列表数据
+                feeds = r.json()["data"]["list"]
+                for i in range(len(feeds)):
+                    # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
+                    if "title" in feeds[i]:
+                        befor_video_title = feeds[i]["title"].strip().replace("\n", "") \
+                            .replace("/", "").replace("\r", "").replace("#", "") \
+                            .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
+                            .replace(":", "").replace("*", "").replace("?", "") \
+                            .replace("?", "").replace('"', "").replace("<", "") \
+                            .replace(">", "").replace("|", "").replace(" ", "").replace("#表情", "").replace("#符号", "")
+
+                        expression = cls.get_expression()
+                        expression_list = expression[0]
+                        char_list = expression[1]
+                        # 随机取一个表情
+                        expression = random.choice(expression_list)
+                        # 生成标题list[表情+title, title+表情]
+                        expression_title_list = [expression + befor_video_title, befor_video_title + expression]
+                        # 从标题list中随机取一个标题
+                        title_list1 = random.choice(expression_title_list)
+                        # 生成标题:原标题+符号
+                        title_list2 = befor_video_title + random.choice(char_list)
+                        # 表情和标题组合,与标题和符号组合,汇总成待使用的标题列表
+                        title_list4 = [title_list2, title_list1]
+                        # 最终标题
+                        video_title = random.choice(title_list4)
+                    else:
+                        video_title = 0
 
-                # 播放量
-                if "play_pv" in feeds[i]:
-                    video_play_cnt = feeds[i]["play_pv"]
-                else:
-                    video_play_cnt = 0
+                    # 视频 ID
+                    if "vid" in feeds[i]:
+                        video_id = feeds[i]["vid"]
+                    else:
+                        video_id = 0
 
-                # 点赞量
-                if "favor" in feeds[i]:
-                    video_like_cnt = feeds[i]["favor"]["total"]
-                else:
-                    video_like_cnt = 0
+                    # 播放
+                    if "play_pv" in feeds[i]:
+                        video_play_cnt = feeds[i]["play_pv"]
+                    else:
+                        video_play_cnt = 0
 
-                # 评论数
-                if "comment_count" in feeds[i]:
-                    video_comment_cnt = feeds[i]["comment_count"]
-                else:
-                    video_comment_cnt = 0
+                    # 点赞量
+                    if "favor" in feeds[i]:
+                        video_like_cnt = feeds[i]["favor"]["total"]
+                    else:
+                        video_like_cnt = 0
 
-                # 分享量
-                if "share" in feeds[i]:
-                    video_share_cnt = feeds[i]["share"]
-                else:
-                    video_share_cnt = 0
+                    # 评论数
+                    if "comment_count" in feeds[i]:
+                        video_comment_cnt = feeds[i]["comment_count"]
+                    else:
+                        video_comment_cnt = 0
 
-                # 时长
-                if "du" in feeds[i]:
-                    video_duration = int(feeds[i]["du"] / 1000)
-                else:
-                    video_duration = 0
+                    # 分享量
+                    if "share" in feeds[i]:
+                        video_share_cnt = feeds[i]["share"]
+                    else:
+                        video_share_cnt = 0
 
-                # 宽和高
-                if "w" or "h" in feeds[i]:
-                    video_width = feeds[i]["w"]
-                    video_height = feeds[i]["h"]
-                else:
-                    video_width = 0
-                    video_height = 0
+                    # 时长
+                    if "du" in feeds[i]:
+                        video_duration = int(feeds[i]["du"] / 1000)
+                    else:
+                        video_duration = 0
 
-                # 发布时间
-                if "t" in feeds[i]:
-                    video_send_time = feeds[i]["t"]
-                else:
-                    video_send_time = 0
-                publish_time_stamp = int(int(video_send_time)/1000)
-                publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                    # 宽和高
+                    if "w" or "h" in feeds[i]:
+                        video_width = feeds[i]["w"]
+                        video_height = feeds[i]["h"]
+                    else:
+                        video_width = 0
+                        video_height = 0
 
-                # 用户名 / 头像
-                if "user" in feeds[i]:
-                    user_name = feeds[i]["user"]["nick"].strip().replace("\n", "") \
-                        .replace("/", "").replace("快手", "").replace(" ", "") \
-                        .replace(" ", "").replace("&NBSP", "").replace("\r", "")
-                    head_url = feeds[i]["user"]["hurl"]
-                else:
-                    user_name = 0
-                    head_url = 0
+                    # 发布时间
+                    if "t" in feeds[i]:
+                        video_send_time = feeds[i]["t"]
+                    else:
+                        video_send_time = 0
+                    publish_time_stamp = int(int(video_send_time)/1000)
+                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+
+                    # 用户名 / 头像
+                    if "user" in feeds[i]:
+                        user_name = feeds[i]["user"]["nick"].strip().replace("\n", "") \
+                            .replace("/", "").replace("快手", "").replace(" ", "") \
+                            .replace(" ", "").replace("&NBSP", "").replace("\r", "")
+                        head_url = feeds[i]["user"]["hurl"]
+                    else:
+                        user_name = 0
+                        head_url = 0
 
-                # 用户 ID
-                profile_id = feeds[i]["id"]
+                    # 用户 ID
+                    profile_id = feeds[i]["id"]
 
-                # 用户 mid
-                profile_mid = feeds[i]["user"]["mid"]
+                    # 用户 mid
+                    profile_mid = feeds[i]["user"]["mid"]
 
-                # 视频封面
-                if "url" in feeds[i]:
-                    cover_url = feeds[i]["url"]
-                else:
-                    cover_url = 0
+                    # 视频封面
+                    if "url" in feeds[i]:
+                        cover_url = feeds[i]["url"]
+                    else:
+                        cover_url = 0
 
-                # 视频播放地址
-                if "v_url" in feeds[i]:
-                    video_url = feeds[i]["v_url"]
-                else:
-                    video_url = 0
-
-                video_dict = {
-                    "video_title": video_title,
-                    "video_id": video_id,
-                    "duration": video_duration,
-                    "play_cnt": video_play_cnt,
-                    "like_cnt": video_like_cnt,
-                    "comment_cnt": video_comment_cnt,
-                    "share_cnt": video_share_cnt,
-                    "user_name": user_name,
-                    "publish_time_stamp": publish_time_stamp,
-                    "publish_time_str": publish_time_str,
-                    "video_width": video_width,
-                    "video_height": video_height,
-                    "avatar_url": head_url,
-                    "profile_id": profile_id,
-                    "profile_mid": profile_mid,
-                    "cover_url": cover_url,
-                    "video_url": video_url,
-                    "session": f"xiaoniangao-hour-{int(time.time())}"
-                }
-                for k, v in video_dict.items():
-                    Common.logger(log_type, crawler).info(f"{k}:{v}")
-
-                # 过滤无效视频
-                if video_title == 0 or video_id == 0 or video_duration == 0 \
-                        or video_send_time == 0 or user_name == 0 or head_url == 0 \
-                        or cover_url == 0 or video_url == 0:
-                    Common.logger(log_type, crawler).warning("无效视频\n")
-                # 抓取基础规则过滤
-                elif cls.download_rule(video_dict) is False:
-                    Common.logger(log_type, crawler).info("不满足基础门槛规则\n")
-                elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
-                    Common.logger(log_type, crawler).info('视频已下载\n')
-                # 过滤敏感词
-                elif any(str(word) if str(word) in video_title else False for word in cls.filter_words(log_type, crawler)) is True:
-                    Common.logger(log_type, crawler).info("视频已中过滤词\n")
-                    time.sleep(1)
-                else:
-                    # 写入飞书小时级feeds数据库表
-                    insert_sql = f""" insert into crawler_xiaoniangao_hour(profile_id,
-                    profile_mid,
-                    platform,
-                    out_video_id,
-                    video_title,
-                    user_name,
-                    cover_url,
-                    video_url,
-                    duration,
-                    publish_time,
-                    play_cnt,
-                    crawler_time_stamp,
-                    crawler_time)
-                    values({profile_id},
-                    {profile_mid},
-                    "{cls.platform}",
-                    "{video_id}",
-                    "{video_title}",
-                    "{user_name}",
-                    "{cover_url}",
-                    "{video_url}",
-                    {video_duration},
-                    "{publish_time_str}",
-                    {video_play_cnt},
-                    {int(time.time())},
-                    "{time.strftime("%Y-%y-%d %H:%M:%S", time.localtime(int(time.time())))}"
-                    )"""
-                    Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-                    MysqlHelper.update_values(log_type, crawler, insert_sql, env)
-                    Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
+                    # 视频播放地址
+                    if "v_url" in feeds[i]:
+                        video_url = feeds[i]["v_url"]
+                    else:
+                        video_url = 0
+
+                    video_dict = {
+                        "video_title": video_title,
+                        "video_id": video_id,
+                        "duration": video_duration,
+                        "play_cnt": video_play_cnt,
+                        "like_cnt": video_like_cnt,
+                        "comment_cnt": video_comment_cnt,
+                        "share_cnt": video_share_cnt,
+                        "user_name": user_name,
+                        "publish_time_stamp": publish_time_stamp,
+                        "publish_time_str": publish_time_str,
+                        "video_width": video_width,
+                        "video_height": video_height,
+                        "avatar_url": head_url,
+                        "profile_id": profile_id,
+                        "profile_mid": profile_mid,
+                        "cover_url": cover_url,
+                        "video_url": video_url,
+                        "session": f"xiaoniangao-hour-{int(time.time())}"
+                    }
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                    # 过滤无效视频
+                    if video_title == 0 or video_id == 0 or video_duration == 0 \
+                            or video_send_time == 0 or user_name == 0 or head_url == 0 \
+                            or cover_url == 0 or video_url == 0:
+                        Common.logger(log_type, crawler).warning("无效视频\n")
+                    # 抓取基础规则过滤
+                    elif cls.download_rule(video_dict) is False:
+                        Common.logger(log_type, crawler).info("不满足基础门槛规则\n")
+                    elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                        Common.logger(log_type, crawler).info('视频已下载\n')
+                    # 过滤敏感词
+                    elif any(str(word) if str(word) in video_title else False for word in cls.filter_words(log_type, crawler)) is True:
+                        Common.logger(log_type, crawler).info("视频已中过滤词\n")
+                        time.sleep(1)
+                    else:
+                        # 写入飞书小时级feeds数据库表
+                        insert_sql = f""" insert into crawler_xiaoniangao_hour(profile_id,
+                        profile_mid,
+                        platform,
+                        out_video_id,
+                        video_title,
+                        user_name,
+                        cover_url,
+                        video_url,
+                        duration,
+                        publish_time,
+                        play_cnt,
+                        crawler_time_stamp,
+                        crawler_time)
+                        values({profile_id},
+                        {profile_mid},
+                        "{cls.platform}",
+                        "{video_id}",
+                        "{video_title}",
+                        "{user_name}",
+                        "{cover_url}",
+                        "{video_url}",
+                        {video_duration},
+                        "{publish_time_str}",
+                        {video_play_cnt},
+                        {int(time.time())},
+                        "{time.strftime("%Y-%y-%d %H:%M:%S", time.localtime(int(time.time())))}"
+                        )"""
+                        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+                        Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
 
     @classmethod
     def get_video_info(cls, log_type, crawler, p_id, p_mid, v_title, v_id):
@@ -492,59 +492,62 @@ class XiaoniangaoHour:
         """
         更新小时榜数据
         """
-        befor_yesterday = (datetime.date.today() + datetime.timedelta(days=-3)).strftime("%Y-%m-%d %H:%M:%S")
-        update_time_stamp = int(time.mktime(time.strptime(befor_yesterday, "%Y-%m-%d %H:%M:%S")))
-        select_sql = f""" select * from crawler_xiaoniangao_hour where crawler_time_stamp >= {update_time_stamp} """
-        update_video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env)
-        if len(update_video_list) == 0:
-            Common.logger(log_type, crawler).info("暂无需要更新的小时榜数据\n")
-            return
-        for update_video_info in update_video_list:
-            profile_id = update_video_info["profile_id"]
-            profile_mid = update_video_info["profile_mid"]
-            video_title = update_video_info["video_title"]
-            video_id = update_video_info["out_video_id"]
-            if datetime.datetime.now().hour == 10 and datetime.datetime.now().minute <=10:
-                video_info_dict = cls.get_video_info(log_type=log_type,
-                                                  crawler=crawler,
-                                                  p_id=profile_id,
-                                                  p_mid=profile_mid,
-                                                  v_title=video_title,
-                                                  v_id=video_id)
-                ten_play_cnt = video_info_dict['play_cnt']
-                Common.logger(log_type, crawler).info(f"ten_play_cnt:{ten_play_cnt}")
-                update_sql = f""" update crawler_xiaoniangao_hour set ten_play_cnt={ten_play_cnt} WHERE out_video_id={video_id}; """
-                # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
-                MysqlHelper.update_values(log_type, crawler, update_sql, env)
-                cls.download_publish(log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint, env)
-            elif datetime.datetime.now().hour == 15 and datetime.datetime.now().minute <=10:
-                video_info_dict = cls.get_video_info(log_type=log_type,
-                                                     crawler=crawler,
-                                                     p_id=profile_id,
-                                                     p_mid=profile_mid,
-                                                     v_title=video_title,
-                                                     v_id=video_id)
-                fifteen_play_cnt = video_info_dict['play_cnt']
-                Common.logger(log_type, crawler).info(f"ten_play_cnt:{fifteen_play_cnt}")
-                update_sql = f""" update crawler_xiaoniangao_hour set ten_play_cnt={fifteen_play_cnt} WHERE out_video_id={video_id}; """
-                # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
-                MysqlHelper.update_values(log_type, crawler, update_sql, env)
-                cls.download_publish(log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint, env)
-            elif datetime.datetime.now().hour == 20 and datetime.datetime.now().minute <=10:
-                video_info_dict = cls.get_video_info(log_type=log_type,
-                                                     crawler=crawler,
-                                                     p_id=profile_id,
-                                                     p_mid=profile_mid,
-                                                     v_title=video_title,
-                                                     v_id=video_id)
-                twenty_play_cnt = video_info_dict['play_cnt']
-                Common.logger(log_type, crawler).info(f"ten_play_cnt:{twenty_play_cnt}")
-                update_sql = f""" update crawler_xiaoniangao_hour set ten_play_cnt={twenty_play_cnt} WHERE out_video_id={video_id}; """
-                # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
-                MysqlHelper.update_values(log_type, crawler, update_sql, env)
-                cls.download_publish(log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint, env)
-            else:
-                pass
+        try:
+            befor_yesterday = (datetime.date.today() + datetime.timedelta(days=-3)).strftime("%Y-%m-%d %H:%M:%S")
+            update_time_stamp = int(time.mktime(time.strptime(befor_yesterday, "%Y-%m-%d %H:%M:%S")))
+            select_sql = f""" select * from crawler_xiaoniangao_hour where crawler_time_stamp >= {update_time_stamp} """
+            update_video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env)
+            if len(update_video_list) == 0:
+                Common.logger(log_type, crawler).info("暂无需要更新的小时榜数据\n")
+                return
+            for update_video_info in update_video_list:
+                profile_id = update_video_info["profile_id"]
+                profile_mid = update_video_info["profile_mid"]
+                video_title = update_video_info["video_title"]
+                video_id = update_video_info["out_video_id"]
+                if datetime.datetime.now().hour == 10 and datetime.datetime.now().minute <=10:
+                    video_info_dict = cls.get_video_info(log_type=log_type,
+                                                      crawler=crawler,
+                                                      p_id=profile_id,
+                                                      p_mid=profile_mid,
+                                                      v_title=video_title,
+                                                      v_id=video_id)
+                    ten_play_cnt = video_info_dict['play_cnt']
+                    Common.logger(log_type, crawler).info(f"ten_play_cnt:{ten_play_cnt}")
+                    update_sql = f""" update crawler_xiaoniangao_hour set ten_play_cnt={ten_play_cnt} WHERE out_video_id={video_id}; """
+                    # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
+                    MysqlHelper.update_values(log_type, crawler, update_sql, env)
+                    cls.download_publish(log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint, env)
+                elif datetime.datetime.now().hour == 15 and datetime.datetime.now().minute <=10:
+                    video_info_dict = cls.get_video_info(log_type=log_type,
+                                                         crawler=crawler,
+                                                         p_id=profile_id,
+                                                         p_mid=profile_mid,
+                                                         v_title=video_title,
+                                                         v_id=video_id)
+                    fifteen_play_cnt = video_info_dict['play_cnt']
+                    Common.logger(log_type, crawler).info(f"ten_play_cnt:{fifteen_play_cnt}")
+                    update_sql = f""" update crawler_xiaoniangao_hour set ten_play_cnt={fifteen_play_cnt} WHERE out_video_id={video_id}; """
+                    # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
+                    MysqlHelper.update_values(log_type, crawler, update_sql, env)
+                    cls.download_publish(log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint, env)
+                elif datetime.datetime.now().hour == 20 and datetime.datetime.now().minute <=10:
+                    video_info_dict = cls.get_video_info(log_type=log_type,
+                                                         crawler=crawler,
+                                                         p_id=profile_id,
+                                                         p_mid=profile_mid,
+                                                         v_title=video_title,
+                                                         v_id=video_id)
+                    twenty_play_cnt = video_info_dict['play_cnt']
+                    Common.logger(log_type, crawler).info(f"ten_play_cnt:{twenty_play_cnt}")
+                    update_sql = f""" update crawler_xiaoniangao_hour set ten_play_cnt={twenty_play_cnt} WHERE out_video_id={video_id}; """
+                    # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
+                    MysqlHelper.update_values(log_type, crawler, update_sql, env)
+                    cls.download_publish(log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint, env)
+                else:
+                    pass
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"update_videoList:{e}\n")
 
     @classmethod
     def download(cls, log_type, crawler, video_info_dict, strategy, oss_endpoint, env):