wangkun 1 year ago
parent
commit
f88c8f9609
1 changed files with 274 additions and 278 deletions
  1. 274 278
      kuaishou/kuaishou_follow/kuaishou_follow.py

+ 274 - 278
kuaishou/kuaishou_follow/kuaishou_follow.py

@@ -317,10 +317,9 @@ class KuaiShouFollow:
             'Accept-Encoding': 'gzip, deflate, br',
             'Connection': 'keep-alive'
         }
-
+        response = requests.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(),
+                                 verify=False, timeout=10)
         try:
-            response = requests.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(),
-                                     verify=False, timeout=10)
             feeds = response.json()['data']['visionProfilePhotoList']['feeds']
         except Exception as e:
             Common.logger(log_type, crawler).error(f"get_videoList:{e},response:{response.text}")
@@ -331,176 +330,175 @@ class KuaiShouFollow:
         pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
         # Common.logger(log_type, crawler).info(f"feeds0: {feeds}\n")
         for i in range(len(feeds)):
-            # if 'photo' not in feeds[i]:
-            #     Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]}\n")
-            #     break
-
-            # video_title
-            if 'caption' not in feeds[i]['photo']:
-                video_title = random_title(log_type, crawler, env, text='title')
-            elif feeds[i]['photo']['caption'].strip() == "":
-                video_title = random_title(log_type, crawler, env, text='title')
-            else:
-                video_title = cls.video_title(log_type, crawler, env, feeds[i]['photo']['caption'])
-
-            if 'videoResource' not in feeds[i]['photo'] \
-                    and 'manifest' not in feeds[i]['photo'] \
-                    and 'manifestH265' not in feeds[i]['photo']:
-                Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
-                break
-            videoResource = feeds[i]['photo']['videoResource']
-
-            if 'h264' not in videoResource and 'hevc' not in videoResource:
-                Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
-                break
-
-            # video_id
-            if 'h264' in videoResource and 'videoId' in videoResource['h264']:
-                video_id = videoResource['h264']['videoId']
-            elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
-                video_id = videoResource['hevc']['videoId']
-            else:
-                video_id = ""
-
-            # play_cnt
-            if 'viewCount' not in feeds[i]['photo']:
-                play_cnt = 0
-            else:
-                play_cnt = int(feeds[i]['photo']['viewCount'])
+            try:
+                # video_title
+                if 'caption' not in feeds[i]['photo']:
+                    video_title = random_title(log_type, crawler, env, text='title')
+                elif feeds[i]['photo']['caption'].strip() == "":
+                    video_title = random_title(log_type, crawler, env, text='title')
+                else:
+                    video_title = cls.video_title(log_type, crawler, env, feeds[i]['photo']['caption'])
+
+                if 'videoResource' not in feeds[i]['photo'] \
+                        and 'manifest' not in feeds[i]['photo'] \
+                        and 'manifestH265' not in feeds[i]['photo']:
+                    Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
+                    break
+                videoResource = feeds[i]['photo']['videoResource']
+
+                if 'h264' not in videoResource and 'hevc' not in videoResource:
+                    Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
+                    break
+
+                # video_id
+                if 'h264' in videoResource and 'videoId' in videoResource['h264']:
+                    video_id = videoResource['h264']['videoId']
+                elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
+                    video_id = videoResource['hevc']['videoId']
+                else:
+                    video_id = ""
 
-            # like_cnt
-            if 'realLikeCount' not in feeds[i]['photo']:
-                like_cnt = 0
-            else:
-                like_cnt = feeds[i]['photo']['realLikeCount']
+                # play_cnt
+                if 'viewCount' not in feeds[i]['photo']:
+                    play_cnt = 0
+                else:
+                    play_cnt = int(feeds[i]['photo']['viewCount'])
 
-            # publish_time
-            if 'timestamp' not in feeds[i]['photo']:
-                publish_time_stamp = 0
-                publish_time_str = ''
-                publish_time = 0
-            else:
-                publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
-                publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-                publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
+                # like_cnt
+                if 'realLikeCount' not in feeds[i]['photo']:
+                    like_cnt = 0
+                else:
+                    like_cnt = feeds[i]['photo']['realLikeCount']
 
-            # duration
-            if 'duration' not in feeds[i]['photo']:
-                duration = 0
-            else:
-                duration = int(int(feeds[i]['photo']['duration']) / 1000)
+                # publish_time
+                if 'timestamp' not in feeds[i]['photo']:
+                    publish_time_stamp = 0
+                    publish_time_str = ''
+                    publish_time = 0
+                else:
+                    publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
+                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                    publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
 
-            # video_width / video_height / video_url
-            mapping = {}
-            for item in ['width', 'height']:
-                try:
-                    val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
-                except:
-                    val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
-                mapping[item] = val
-            video_width = int(mapping['width']) if mapping['width'] else 0
-            video_height = int(mapping['height']) if mapping['height'] else 0
-            # cover_url
-            if 'coverUrl' not in feeds[i]['photo']:
-                cover_url = ""
-            else:
-                cover_url = feeds[i]['photo']['coverUrl']
-
-            # user_name / avatar_url
-            user_name = feeds[i]['author']['name']
-            avatar_url = feeds[i]['author']['headerUrl']
-
-            video_url = feeds[i]['photo']['photoUrl']
-            video_dict = {'video_title': video_title,
-                          'video_id': video_id,
-                          'play_cnt': play_cnt,
-                          'comment_cnt': 0,
-                          'like_cnt': like_cnt,
-                          'share_cnt': 0,
-                          'video_width': video_width,
-                          'video_height': video_height,
-                          'duration': duration,
-                          'publish_time': publish_time,
-                          'publish_time_stamp': publish_time_stamp,
-                          'publish_time_str': publish_time_str,
-                          'user_name': user_name,
-                          'user_id': out_uid,
-                          'avatar_url': avatar_url,
-                          'cover_url': cover_url,
-                          'video_url': video_url,
-                          'session': f"kuaishou{int(time.time())}"}
-
-            rule_1 = cls.download_rule(video_dict, rule_dict_1)
-            Common.logger(log_type, crawler).info(f"video_title:{video_title}")
-            Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
-
-            Common.logger(log_type, crawler).info(
-                f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
-            Common.logger(log_type, crawler).info(
-                f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
-            Common.logger(log_type, crawler).info(
-                f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
-            Common.logger(log_type, crawler).info(
-                f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
-            Common.logger(log_type, crawler).info(
-                f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
-            Common.logger(log_type, crawler).info(
-                f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
-            Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
-
-            rule_2 = cls.download_rule(video_dict, rule_dict_2)
-            Common.logger(log_type, crawler).info(
-                f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
-            Common.logger(log_type, crawler).info(
-                f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
-            Common.logger(log_type, crawler).info(
-                f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
-            Common.logger(log_type, crawler).info(
-                f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
-            Common.logger(log_type, crawler).info(
-                f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
-            Common.logger(log_type, crawler).info(
-                f"publish_time:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
-            Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
-
-            if video_title == "" or video_url == "":
-                Common.logger(log_type, crawler).info("无效视频\n")
-                continue
-            elif rule_1 is True:
-                if download_cnt_1 < int(
-                        rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
-                                                                                                      "")[
-                            -1]):
-                    cls.download_publish(log_type=log_type,
-                                         crawler=crawler,
-                                         strategy=strategy,
-                                         video_dict=video_dict,
-                                         rule_dict=rule_dict_1,
-                                         our_uid=our_uid,
-                                         oss_endpoint=oss_endpoint,
-                                         env=env,
-                                         machine=machine)
-                    # if download_finished is True:
-                    #     download_cnt_1 += 1
-            elif rule_2 is True:
-                if download_cnt_2 < int(
-                        rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
-                                                                                                      "")[
-                            -1]):
-                    cls.download_publish(log_type=log_type,
-                                         crawler=crawler,
-                                         strategy=strategy,
-                                         video_dict=video_dict,
-                                         rule_dict=rule_dict_2,
-                                         our_uid=our_uid,
-                                         oss_endpoint=oss_endpoint,
-                                         env=env,
-                                         machine=machine)
-                    # if download_finished is True:
-                    #     download_cnt_2 += 1
-            else:
-                Common.logger(log_type, crawler).info("不满足下载规则\n")
-                # Common.logger(log_type, crawler).info(f"feeds: {feeds}\n")
+                # duration
+                if 'duration' not in feeds[i]['photo']:
+                    duration = 0
+                else:
+                    duration = int(int(feeds[i]['photo']['duration']) / 1000)
+
+                # video_width / video_height / video_url
+                mapping = {}
+                for item in ['width', 'height']:
+                    try:
+                        val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
+                    except:
+                        val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
+                    mapping[item] = val
+                video_width = int(mapping['width']) if mapping['width'] else 0
+                video_height = int(mapping['height']) if mapping['height'] else 0
+                # cover_url
+                if 'coverUrl' not in feeds[i]['photo']:
+                    cover_url = ""
+                else:
+                    cover_url = feeds[i]['photo']['coverUrl']
+
+                # user_name / avatar_url
+                user_name = feeds[i]['author']['name']
+                avatar_url = feeds[i]['author']['headerUrl']
+
+                video_url = feeds[i]['photo']['photoUrl']
+                video_dict = {'video_title': video_title,
+                              'video_id': video_id,
+                              'play_cnt': play_cnt,
+                              'comment_cnt': 0,
+                              'like_cnt': like_cnt,
+                              'share_cnt': 0,
+                              'video_width': video_width,
+                              'video_height': video_height,
+                              'duration': duration,
+                              'publish_time': publish_time,
+                              'publish_time_stamp': publish_time_stamp,
+                              'publish_time_str': publish_time_str,
+                              'user_name': user_name,
+                              'user_id': out_uid,
+                              'avatar_url': avatar_url,
+                              'cover_url': cover_url,
+                              'video_url': video_url,
+                              'session': f"kuaishou{int(time.time())}"}
+
+                rule_1 = cls.download_rule(video_dict, rule_dict_1)
+                Common.logger(log_type, crawler).info(f"video_title:{video_title}")
+                Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
+
+                Common.logger(log_type, crawler).info(
+                    f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
+                Common.logger(log_type, crawler).info(
+                    f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
+                Common.logger(log_type, crawler).info(
+                    f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
+                Common.logger(log_type, crawler).info(
+                    f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
+                Common.logger(log_type, crawler).info(
+                    f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
+                Common.logger(log_type, crawler).info(
+                    f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
+                Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
+
+                rule_2 = cls.download_rule(video_dict, rule_dict_2)
+                Common.logger(log_type, crawler).info(
+                    f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
+                Common.logger(log_type, crawler).info(
+                    f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
+                Common.logger(log_type, crawler).info(
+                    f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
+                Common.logger(log_type, crawler).info(
+                    f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
+                Common.logger(log_type, crawler).info(
+                    f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
+                Common.logger(log_type, crawler).info(
+                    f"publish_time:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
+                Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
+
+                if video_title == "" or video_url == "":
+                    Common.logger(log_type, crawler).info("无效视频\n")
+                    continue
+                elif rule_1 is True:
+                    if download_cnt_1 < int(
+                            rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
+                                                                                                          "")[
+                                -1]):
+                        cls.download_publish(log_type=log_type,
+                                             crawler=crawler,
+                                             strategy=strategy,
+                                             video_dict=video_dict,
+                                             rule_dict=rule_dict_1,
+                                             our_uid=our_uid,
+                                             oss_endpoint=oss_endpoint,
+                                             env=env,
+                                             machine=machine)
+                        # if download_finished is True:
+                        #     download_cnt_1 += 1
+                elif rule_2 is True:
+                    if download_cnt_2 < int(
+                            rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
+                                                                                                          "")[
+                                -1]):
+                        cls.download_publish(log_type=log_type,
+                                             crawler=crawler,
+                                             strategy=strategy,
+                                             video_dict=video_dict,
+                                             rule_dict=rule_dict_2,
+                                             our_uid=our_uid,
+                                             oss_endpoint=oss_endpoint,
+                                             env=env,
+                                             machine=machine)
+                        # if download_finished is True:
+                        #     download_cnt_2 += 1
+                else:
+                    Common.logger(log_type, crawler).info("不满足下载规则\n")
+                    # Common.logger(log_type, crawler).info(f"feeds: {feeds}\n")
+            except Exception as e:
+                Common.logger(log_type, crawler).warning(f"抓取单条视频异常:{e}\n")
 
             # if pcursor == "no_more":
             #     Common.logger(log_type, crawler).info(f"作者,{out_uid},已经到底了,没有更多内容了\n")
@@ -517,127 +515,125 @@ class KuaiShouFollow:
 
     @classmethod
     def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
-        try:
-            filter_words = get_config_from_mysql(log_type, crawler, env, text='filter')
-            for filter_word in filter_words:
-                if filter_word in video_dict['video_title']:
-                    Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
-                    return
-            download_finished = False
-            if cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'],
-                                video_dict['publish_time_str'], env, machine) != 0:
-                Common.logger(log_type, crawler).info('视频已下载\n')
-            else:
-                # 下载视频
-                Common.download_method(log_type=log_type, crawler=crawler, text='video',
-                                       title=video_dict['video_title'], url=video_dict['video_url'])
-                md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        filter_words = get_config_from_mysql(log_type, crawler, env, text='filter')
+        for filter_word in filter_words:
+            if filter_word in video_dict['video_title']:
+                Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
+                return
+        download_finished = False
+        if cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'],
+                            video_dict['publish_time_str'], env, machine) != 0:
+            Common.logger(log_type, crawler).info('视频已下载\n')
+        else:
+            # 下载视频
+            Common.download_method(log_type=log_type, crawler=crawler, text='video',
+                                   title=video_dict['video_title'], url=video_dict['video_url'])
+            md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+            try:
                 if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
                     # 删除视频文件夹
                     shutil.rmtree(f"./{crawler}/videos/{md_title}")
                     Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
                     return
-                # ffmpeg_dict = Common.ffmpeg(log_type, crawler,
-                #                             f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
-                # if ffmpeg_dict is None or ffmpeg_dict['size'] == 0:
-                #     Common.logger(log_type, crawler).warning(f"下载的视频无效,已删除\n")
-                #     # 删除视频文件夹
-                #     shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-                #     return download_finished
-                # 下载封面
-                Common.download_method(log_type=log_type, crawler=crawler, text='cover',
-                                       title=video_dict['video_title'], url=video_dict['cover_url'])
-                # 保存视频信息至txt
-                Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
-
-                # 上传视频
-                Common.logger(log_type, crawler).info("开始上传视频...")
-                our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                          crawler=crawler,
-                                                          strategy=strategy,
-                                                          our_uid=our_uid,
-                                                          env=env,
-                                                          oss_endpoint=oss_endpoint)
-                if env == 'dev':
-                    our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-                else:
-                    our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-                Common.logger(log_type, crawler).info("视频上传完成")
+            except FileNotFoundError:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("未发现视频文件,删除成功\n")
+                return
+
+            # 下载封面
+            Common.download_method(log_type=log_type, crawler=crawler, text='cover',
+                                   title=video_dict['video_title'], url=video_dict['cover_url'])
+            # 保存视频信息至txt
+            Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+            # 上传视频
+            Common.logger(log_type, crawler).info("开始上传视频...")
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy=strategy,
+                                                      our_uid=our_uid,
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+            if env == 'dev':
+                our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+            else:
+                our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+            Common.logger(log_type, crawler).info("视频上传完成")
 
-                if our_video_id is None:
+            if our_video_id is None:
+                try:
                     Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
                     # 删除视频文件夹
-                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+                    shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                    return download_finished
+                except FileNotFoundError:
                     return download_finished
 
-                # 视频信息保存数据库
-                insert_sql = f""" insert into crawler_video(video_id,
-                                                        user_id,
-                                                        out_user_id,
-                                                        platform,
-                                                        strategy,
-                                                        out_video_id,
-                                                        video_title,
-                                                        cover_url,
-                                                        video_url,
-                                                        duration,
-                                                        publish_time,
-                                                        play_cnt,
-                                                        crawler_rule,
-                                                        width,
-                                                        height)
-                                                        values({our_video_id},
-                                                        {our_uid},
-                                                        "{video_dict['user_id']}",
-                                                        "{cls.platform}",
-                                                        "定向爬虫策略",
-                                                        "{video_dict['video_id']}",
-                                                        "{video_dict['video_title']}",
-                                                        "{video_dict['cover_url']}",
-                                                        "{video_dict['video_url']}",
-                                                        {int(video_dict['duration'])},
-                                                        "{video_dict['publish_time_str']}",
-                                                        {int(video_dict['play_cnt'])},
-                                                        '{json.dumps(rule_dict)}',
-                                                        {int(video_dict['video_width'])},
-                                                        {int(video_dict['video_height'])}) """
-                Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-                MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
-                Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
-
-                # 视频写入飞书
-                Feishu.insert_columns(log_type, 'kuaishou', "fYdA8F", "ROWS", 1, 2)
-                upload_time = int(time.time())
-                values = [[our_video_id,
-                           time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
-                           "定向榜",
-                           str(video_dict['video_id']),
-                           video_dict['video_title'],
-                           our_video_link,
-                           video_dict['play_cnt'],
-                           video_dict['comment_cnt'],
-                           video_dict['like_cnt'],
-                           video_dict['share_cnt'],
-                           video_dict['duration'],
-                           f"{video_dict['video_width']}*{video_dict['video_height']}",
-                           video_dict['publish_time_str'],
-                           video_dict['user_name'],
-                           video_dict['user_id'],
-                           video_dict['avatar_url'],
-                           video_dict['cover_url'],
-                           video_dict['video_url']]]
-                time.sleep(1)
-                Feishu.update_values(log_type, 'kuaishou', "fYdA8F", "E2:Z2", values)
-                Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
-                download_finished = True
-            return download_finished
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f"download_publish:{e}\n")
+            # 视频信息保存数据库
+            insert_sql = f""" insert into crawler_video(video_id,
+                                                    user_id,
+                                                    out_user_id,
+                                                    platform,
+                                                    strategy,
+                                                    out_video_id,
+                                                    video_title,
+                                                    cover_url,
+                                                    video_url,
+                                                    duration,
+                                                    publish_time,
+                                                    play_cnt,
+                                                    crawler_rule,
+                                                    width,
+                                                    height)
+                                                    values({our_video_id},
+                                                    {our_uid},
+                                                    "{video_dict['user_id']}",
+                                                    "{cls.platform}",
+                                                    "定向爬虫策略",
+                                                    "{video_dict['video_id']}",
+                                                    "{video_dict['video_title']}",
+                                                    "{video_dict['cover_url']}",
+                                                    "{video_dict['video_url']}",
+                                                    {int(video_dict['duration'])},
+                                                    "{video_dict['publish_time_str']}",
+                                                    {int(video_dict['play_cnt'])},
+                                                    '{json.dumps(rule_dict)}',
+                                                    {int(video_dict['video_width'])},
+                                                    {int(video_dict['video_height'])}) """
+            Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+            MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
+            Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+
+            # 视频写入飞书
+            Feishu.insert_columns(log_type, 'kuaishou', "fYdA8F", "ROWS", 1, 2)
+            upload_time = int(time.time())
+            values = [[our_video_id,
+                       time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                       "定向榜",
+                       str(video_dict['video_id']),
+                       video_dict['video_title'],
+                       our_video_link,
+                       video_dict['play_cnt'],
+                       video_dict['comment_cnt'],
+                       video_dict['like_cnt'],
+                       video_dict['share_cnt'],
+                       video_dict['duration'],
+                       f"{video_dict['video_width']}*{video_dict['video_height']}",
+                       video_dict['publish_time_str'],
+                       video_dict['user_name'],
+                       video_dict['user_id'],
+                       video_dict['avatar_url'],
+                       video_dict['cover_url'],
+                       video_dict['video_url']]]
+            time.sleep(1)
+            Feishu.update_values(log_type, 'kuaishou', "fYdA8F", "E2:Z2", values)
+            Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+            download_finished = True
+        return download_finished
 
     @classmethod
     def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
-        # user_list = cls.get_user_list(log_type=log_type, crawler=crawler, sheetid="bTSzxW", env=env, machine=machine)
-
         user_list = get_user_from_mysql(log_type, crawler, crawler, env)
         for user in user_list:
             try:
@@ -655,7 +651,7 @@ class KuaiShouFollow:
                                   env=env,
                                   machine=machine)
             except Exception as e:
-                continue
+                Common.logger(log_type, crawler).warning(f"抓取用户{user}时异常:{e}\n")
 
 
 if __name__ == "__main__":