wangkun před 2 roky
rodič
revize
2e5c76a70f
8 změnil soubory, kde provedl 228 přidání a 234 odebrání
  1. 3 34
      common/common.py
  2. 15 15
      common/db.py
  3. 16 0
      common/demo.py
  4. 30 30
      common/users.py
  5. 3 3
      requirements.txt
  6. binární
      youtube/logs/.DS_Store
  7. binární
      youtube/videos/.DS_Store
  8. 161 152
      youtube/youtube_follow/youtube_follow.py

+ 3 - 34
common/common.py

@@ -10,9 +10,7 @@ import datetime
 import os
 import time
 import requests
-import json
 import ffmpeg
-from urllib import parse, request
 import urllib3
 proxies = {"http": None, "https": None}
 
@@ -98,7 +96,7 @@ class Common:
                       str(video_dict['like_cnt']) + "\n" +
                       str(video_dict['share_cnt']) + "\n" +
                       f"{video_dict['video_width']}*{video_dict['video_height']}" + "\n" +
-                      str(video_dict['publish_time']) + "\n" +
+                      str(video_dict['publish_time_stamp']) + "\n" +
                       str(video_dict['user_name']) + "\n" +
                       str(video_dict['avatar_url']) + "\n" +
                       str(video_dict['video_url']) + "\n" +
@@ -176,35 +174,6 @@ class Common:
             except Exception as e:
                 cls.logger(log_type, crawler).error(f"封面下载失败:{e}\n")
 
-    # 有道翻译:英文 → 中文
-    @classmethod
-    def fanyi(cls, query):
-        req_url = 'http://fanyi.youdao.com/translate'  # 创建连接接口
-        # 创建要提交的数据
-        Form_Date = {'i': query,
-                     'doctype': 'json',
-                     'form': 'AUTO',
-                     'to': 'AUTO',
-                     # 'to': 'Chinese',
-                     'smartresult': 'dict',
-                     'client': 'fanyideskweb',
-                     'salt': '1526995097962',
-                     'sign': '8e4c4765b52229e1f3ad2e633af89c76',
-                     'version': '2.1',
-                     'keyform': 'fanyi.web',
-                     'action': 'FY_BY_REALTIME',
-                     'typoResult': 'false'}
-
-        data = parse.urlencode(Form_Date).encode('utf-8')  # 数据转换
-        response = request.urlopen(req_url, data)  # 提交数据并解析
-        html = response.read().decode('utf-8')  # 服务器返回结果读取
-        # print(html)
-        # 可以看出html是一个json格式
-        translate_results = json.loads(html)  # 以json格式载入
-        translate_results = translate_results['translateResult'][0][0]['tgt']  # json格式调取
-        # print(translate_results)  # 输出结果
-        return translate_results  # 返回结果
-
     @classmethod
     def ffmpeg(cls, log_type, crawler, video_path):
         probe = ffmpeg.probe(video_path)
@@ -227,6 +196,6 @@ class Common:
 
 
 if __name__ == "__main__":
-    res = Common.fanyi("10 MOST UNIQUE Dance Groups EVER On Britain's Got Talent!")
-    print(res)
+
+    pass
 

+ 15 - 15
common/db.py

@@ -48,25 +48,25 @@ class MysqlHelper:
 
     @classmethod
     def get_values(cls, log_type, crawler, sql, env, machine):
-        # try:
-        # 连接数据库
-        connect = cls.connect_mysql(env, machine)
-        # 返回一个 Cursor对象
-        mysql = connect.cursor()
+        try:
+            # 连接数据库
+            connect = cls.connect_mysql(env, machine)
+            # 返回一个 Cursor对象
+            mysql = connect.cursor()
 
-        # 执行 sql 语句
-        mysql.execute(sql)
+            # 执行 sql 语句
+            mysql.execute(sql)
 
-        # fetchall方法返回的是一个元组,里面每个元素也是元组,代表一行记录
-        data = mysql.fetchall()
+            # fetchall方法返回的是一个元组,里面每个元素也是元组,代表一行记录
+            data = mysql.fetchall()
 
-        # 关闭数据库连接
-        connect.close()
+            # 关闭数据库连接
+            connect.close()
 
-        # 返回查询结果,元组
-        return data
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error(f"get_values异常:{e}\n")
+            # 返回查询结果,元组
+            return data
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_values异常:{e}\n")
 
     @classmethod
     def update_values(cls, log_type, crawler, sql, env, machine):

+ 16 - 0
common/demo.py

@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/10
+import time
+
+
+class Demo:
+    @classmethod
+    def test_time(cls):
+        time_str = '2023-02-07'
+        time_stamp = int(time.mktime(time.strptime(time_str, "%Y-%m-%d")))
+        print(time_stamp)
+
+
+if __name__ == "__main__":
+    Demo.test_time()

+ 30 - 30
common/users.py

@@ -23,36 +23,36 @@ class Users:
         :param env: 环境
         :return: 站内 UID
         """
-        # try:
-        if env == 'dev':
-            # 外网
-            url = 'https://videotest.yishihui.com/longvideoapi/user/virtual/crawler/registerVirtualUser'
-            # 内网
-            # url = 'http://videotest-internal.yishihui.com/longvideoapi/user/virtual/crawler/registerVirtualUser'
-        elif env == 'prod':
-            # 外网
-            url = 'https://longvideoapi.piaoquantv.com/longvideoapi/user/virtual/crawler/registerVirtualUser'
-            # 内网
-            # url = 'http://longvideoapi-internal.piaoquantv.com/longvideoapi/user/virtual/crawler/registerVirtualUser'
-        else:
-            # 外网
-            url = 'https://longvideoapi.piaoquantv.com/longvideoapi/user/virtual/crawler/registerVirtualUser'
-            # 内网
-            # url = 'http://longvideoapi-internal.piaoquantv.com/longvideoapi/user/virtual/crawler/registerVirtualUser'
-        params = {
-            # 'count': 1,     # (必须)账号个数:传1
-            # 'accountType': 4,   # (必须)账号类型 :传 4 app虚拟账号
-            'pwd': '',  # 密码 默认 12346
-            'nickName': user_dict['nickName'],  # 昵称  默认 vuser......
-            'avatarUrl': user_dict['avatarUrl'],  # 头像Url  默认 http://weapppiccdn.yishihui.com/resources/images/pic_normal.png
-            'tagName': user_dict['tagName'],  # 多条数据用英文逗号分割
-        }
-        response = requests.post(url=url, params=params)
-        # print(response.text)
-        user_id = response.json()['data']
-        return user_id
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error(f"create_user异常:{e}\n")
+        try:
+            if env == 'dev':
+                # 外网
+                url = 'https://videotest.yishihui.com/longvideoapi/user/virtual/crawler/registerVirtualUser'
+                # 内网
+                # url = 'http://videotest-internal.yishihui.com/longvideoapi/user/virtual/crawler/registerVirtualUser'
+            elif env == 'prod':
+                # 外网
+                url = 'https://longvideoapi.piaoquantv.com/longvideoapi/user/virtual/crawler/registerVirtualUser'
+                # 内网
+                # url = 'http://longvideoapi-internal.piaoquantv.com/longvideoapi/user/virtual/crawler/registerVirtualUser'
+            else:
+                # 外网
+                url = 'https://longvideoapi.piaoquantv.com/longvideoapi/user/virtual/crawler/registerVirtualUser'
+                # 内网
+                # url = 'http://longvideoapi-internal.piaoquantv.com/longvideoapi/user/virtual/crawler/registerVirtualUser'
+            params = {
+                # 'count': 1,     # (必须)账号个数:传1
+                # 'accountType': 4,   # (必须)账号类型 :传 4 app虚拟账号
+                'pwd': '',  # 密码 默认 12346
+                'nickName': user_dict['nickName'],  # 昵称  默认 vuser......
+                'avatarUrl': user_dict['avatarUrl'],  # 头像Url  默认 http://weapppiccdn.yishihui.com/resources/images/pic_normal.png
+                'tagName': user_dict['tagName'],  # 多条数据用英文逗号分割
+            }
+            response = requests.post(url=url, params=params)
+            # print(response.text)
+            user_id = response.json()['data']
+            return user_id
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"create_user异常:{e}\n")
 
 
 if __name__ == "__main__":

+ 3 - 3
requirements.txt

@@ -1,9 +1,9 @@
+Appium_Python_Client==2.8.1
 ffmpeg==1.4
 loguru==0.6.0
 oss2==2.15.0
+psutil==5.9.2
 PyMySQL==1.0.2
 requests==2.27.1
-selenium~=4.2.0
+selenium==4.8.0
 urllib3==1.26.9
-Appium-Python-Client~=2.7.1
-psutil~=5.9.2

binární
youtube/logs/.DS_Store


binární
youtube/videos/.DS_Store


+ 161 - 152
youtube/youtube_follow/youtube_follow.py

@@ -676,31 +676,30 @@ class Follow:
 
     @classmethod
     def get_videos(cls, log_type, crawler, strategy, oss_endpoint, env, browse_id, out_uid, our_uid, machine):
-        # try:
-        while True:
-            feeds = cls.get_feeds(log_type, crawler, browse_id, out_uid)
-            # Common.logger(log_type, crawler).info(f"feeds:{feeds}\n")
-            for i in range(len(feeds)):
-                if 'richItemRenderer' not in feeds[i]:
-                    Common.logger(log_type, crawler).warning(f'feeds:{feeds[i]}\n')
-                elif 'content' not in feeds[i]['richItemRenderer']:
-                    Common.logger(log_type, crawler).warning(f'feeds:{feeds[i]["richItemRenderer"]}\n')
-                elif 'videoRenderer' not in feeds[i]['richItemRenderer']['content']:
-                    Common.logger(log_type, crawler).warning(f'feeds:{feeds[i]["richItemRenderer"]["content"]}\n')
-                elif 'videoId' not in feeds[i]["richItemRenderer"]["content"]['videoRenderer']:
-                    Common.logger(log_type, crawler).warning(f'feeds:{feeds[i]["richItemRenderer"]["content"]["videoRenderer"]}\n')
-                else:
-                    video_id = feeds[i]["richItemRenderer"]["content"]['videoRenderer']['videoId']
-                    video_dict = cls.get_video_info(log_type, crawler, out_uid, video_id, machine)
-                    # 发布时间<=30天
-                    publish_time = int(time.mktime(time.strptime(video_dict['publish_time'], "%Y-%m-%d")))
-                    if int(time.time()) - publish_time <= 3600*24*30:
-                        cls.download_publish(log_type, crawler, video_dict, strategy, our_uid, env, oss_endpoint, machine)
+        try:
+            while True:
+                feeds = cls.get_feeds(log_type, crawler, browse_id, out_uid)
+                for i in range(len(feeds)):
+                    if 'richItemRenderer' not in feeds[i]:
+                        Common.logger(log_type, crawler).warning(f'feeds:{feeds[i]}\n')
+                    elif 'content' not in feeds[i]['richItemRenderer']:
+                        Common.logger(log_type, crawler).warning(f'feeds:{feeds[i]["richItemRenderer"]}\n')
+                    elif 'videoRenderer' not in feeds[i]['richItemRenderer']['content']:
+                        Common.logger(log_type, crawler).warning(f'feeds:{feeds[i]["richItemRenderer"]["content"]}\n')
+                    elif 'videoId' not in feeds[i]["richItemRenderer"]["content"]['videoRenderer']:
+                        Common.logger(log_type, crawler).warning(f'feeds:{feeds[i]["richItemRenderer"]["content"]["videoRenderer"]}\n')
                     else:
-                        Common.logger(log_type, crawler).info('发布时间超过30天\n')
-                        return
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error(f"get_videos异常:{e}\n")
+                        video_id = feeds[i]["richItemRenderer"]["content"]['videoRenderer']['videoId']
+                        video_dict = cls.get_video_info(log_type, crawler, out_uid, video_id, machine)
+                        # 发布时间<=30天
+                        publish_time = int(time.mktime(time.strptime(video_dict['publish_time'], "%Y-%m-%d")))
+                        if int(time.time()) - publish_time <= 3600*24*30:
+                            cls.download_publish(log_type, crawler, video_dict, strategy, our_uid, env, oss_endpoint, machine)
+                        else:
+                            Common.logger(log_type, crawler).info('发布时间超过30天\n')
+                            return
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_videos异常:{e}\n")
 
     @classmethod
     def get_video_info(cls, log_type, crawler, out_uid, video_id, machine):
@@ -881,7 +880,6 @@ class Follow:
                 'x-youtube-client-version': '2.20230201.01.00'
             }
             response = requests.post(url=url, headers=headers, data=payload)
-            # Common.logger(log_type, crawler).info(f"get_video_info_response:{response.json()}\n")
             if response.status_code != 200:
                 Common.logger(log_type, crawler).warning(f"get_video_info_response:{response.text}\n")
             elif 'streamingData' not in response.json():
@@ -920,6 +918,14 @@ class Follow:
                 else:
                     publish_time = playerMicroformatRenderer['publishDate']
 
+                if publish_time == '':
+                    publish_time_stamp = 0
+                elif ':' in publish_time:
+                    publish_time_stamp = int(time.mktime(time.strptime(publish_time, "%Y-%m-%d %H:%M:%S")))
+                else:
+                    publish_time_stamp = int(time.mktime(time.strptime(publish_time, "%Y-%m-%d")))
+
+
                 # user_name
                 if 'author' not in videoDetails:
                     user_name = ''
@@ -962,6 +968,7 @@ class Follow:
                     'duration': duration,
                     'play_cnt': play_cnt,
                     'publish_time': publish_time,
+                    'publish_time_stamp': publish_time_stamp,
                     'user_name': user_name,
                     'out_uid': out_uid,
                     'cover_url': cover_url,
@@ -973,141 +980,143 @@ class Follow:
 
     @classmethod
     def download_publish(cls, log_type, crawler, video_dict, strategy, our_uid, env, oss_endpoint, machine):
-        # try:
-        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_dict['video_id']}" """
-        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
-        if video_dict['video_title'] == '' or  video_dict['video_url'] == '':
-            Common.logger(log_type, crawler).info('无效视频\n')
-        elif video_dict['duration'] > 600 or video_dict['duration'] < 60:
-            Common.logger(log_type, crawler).info(f"时长:{video_dict['duration']}不满足规则\n")
-        elif repeat_video is not None and len(repeat_video) != 0:
-            Common.logger(log_type, crawler).info('视频已下载\n')
-        else:
-            # 下载视频
-            Common.logger(log_type, crawler).info('开始下载视频...')
-            Common.download_method(log_type, crawler, 'video', video_dict['video_title'], video_dict['video_url'])
-            ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
-            video_width = int(ffmpeg_dict['width'])
-            video_height = int(ffmpeg_dict['height'])
-            duration = int(ffmpeg_dict['duration'])
-            video_size = int(ffmpeg_dict['size'])
-
-            Common.logger(log_type, crawler).info(f'video_width:{video_width}')
-            Common.logger(log_type, crawler).info(f'video_height:{video_height}')
-            Common.logger(log_type, crawler).info(f'duration:{duration}')
-            Common.logger(log_type, crawler).info(f'video_size:{video_size}\n')
-
-            video_dict['video_width'] = video_width
-            video_dict['video_height'] = video_height
-            video_dict['duration'] = duration
-            video_dict['comment_cnt'] = 0
-            video_dict['like_cnt'] = 0
-            video_dict['share_cnt'] = 0
-            video_dict['avatar_url'] = video_dict['cover_url']
-            video_dict['session'] = f'youtube{int(time.time())}'
-            rule='1,2'
-            if duration < 60 or duration > 600:
-                # 删除视频文件夹
-                shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}/")
-                Common.logger(log_type, crawler).info(f"时长:{video_dict['duration']}不满足抓取规则,删除成功\n")
-                return
-            elif video_size == 0 or duration == 0 or video_size is None or duration is None:
-                # 删除视频文件夹
-                shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}/")
-                Common.logger(log_type, crawler).info(f"视频下载出错,删除成功\n")
-                return
+        try:
+            sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_dict['video_id']}" """
+            repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
+            if video_dict['video_title'] == '' or  video_dict['video_url'] == '':
+                Common.logger(log_type, crawler).info('无效视频\n')
+            elif video_dict['duration'] > 600 or video_dict['duration'] < 60:
+                Common.logger(log_type, crawler).info(f"时长:{video_dict['duration']}不满足规则\n")
+            elif repeat_video is not None and len(repeat_video) != 0:
+                Common.logger(log_type, crawler).info('视频已下载\n')
+            elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, crawler, 'GVxlYk') for x in y]:
+                Common.logger(log_type, crawler).info('视频已下载\n')
             else:
-                # 下载封面
-                Common.download_method(log_type, crawler, 'cover', video_dict['video_title'], video_dict['cover_url'])
-                # 保存视频文本信息
-                Common.save_video_info(log_type, crawler, video_dict)
+                # 下载视频
+                Common.logger(log_type, crawler).info('开始下载视频...')
+                Common.download_method(log_type, crawler, 'video', video_dict['video_title'], video_dict['video_url'])
+                ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+                video_width = int(ffmpeg_dict['width'])
+                video_height = int(ffmpeg_dict['height'])
+                duration = int(ffmpeg_dict['duration'])
+                video_size = int(ffmpeg_dict['size'])
+
+                Common.logger(log_type, crawler).info(f'video_width:{video_width}')
+                Common.logger(log_type, crawler).info(f'video_height:{video_height}')
+                Common.logger(log_type, crawler).info(f'duration:{duration}')
+                Common.logger(log_type, crawler).info(f'video_size:{video_size}\n')
 
-                # 上传视频
-                Common.logger(log_type, crawler).info(f"开始上传视频")
-                if env == 'dev':
-                    our_video_id = Publish.upload_and_publish(log_type, crawler, strategy, our_uid, env, oss_endpoint)
-                    our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                video_dict['video_width'] = video_width
+                video_dict['video_height'] = video_height
+                video_dict['duration'] = duration
+                video_dict['comment_cnt'] = 0
+                video_dict['like_cnt'] = 0
+                video_dict['share_cnt'] = 0
+                video_dict['avatar_url'] = video_dict['cover_url']
+                video_dict['session'] = f'youtube{int(time.time())}'
+                rule='1,2'
+                if duration < 60 or duration > 600:
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}/")
+                    Common.logger(log_type, crawler).info(f"时长:{video_dict['duration']}不满足抓取规则,删除成功\n")
+                    return
+                elif video_size == 0 or duration == 0 or video_size is None or duration is None:
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}/")
+                    Common.logger(log_type, crawler).info(f"视频下载出错,删除成功\n")
+                    return
                 else:
-                    our_video_id = Publish.upload_and_publish(log_type, crawler, strategy, our_uid, env, oss_endpoint)
-                    our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-                Common.logger(log_type, crawler).info("视频上传完成")
+                    # 下载封面
+                    Common.download_method(log_type, crawler, 'cover', video_dict['video_title'], video_dict['cover_url'])
+                    # 保存视频文本信息
+                    Common.save_video_info(log_type, crawler, video_dict)
+
+                    # 上传视频
+                    Common.logger(log_type, crawler).info(f"开始上传视频")
+                    if env == 'dev':
+                        our_video_id = Publish.upload_and_publish(log_type, crawler, strategy, our_uid, env, oss_endpoint)
+                        our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                    else:
+                        our_video_id = Publish.upload_and_publish(log_type, crawler, strategy, our_uid, env, oss_endpoint)
+                        our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                    Common.logger(log_type, crawler).info("视频上传完成")
 
-                # 视频信息保存至飞书
-                Feishu.insert_columns(log_type, crawler, "GVxlYk", "ROWS", 1, 2)
-                # 视频ID工作表,首行写入数据
-                upload_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time())))
-                values = [[upload_time,
-                           "定向榜",
-                           video_dict['video_id'],
-                           video_dict['video_title'],
-                           our_video_link,
-                           video_dict['play_cnt'],
-                           video_dict['duration'],
-                           f'{video_width}*{video_height}',
-                           video_dict['publish_time'],
-                           video_dict['user_name'],
-                           video_dict['cover_url'],
-                           video_dict['video_url']
-                           ]]
-                time.sleep(1)
-                Feishu.update_values(log_type, crawler, "GVxlYk", "F2:Z2", values)
-                Common.logger(log_type, crawler).info('视频信息写入定向_已下载表成功\n')
+                    # 视频信息保存至飞书
+                    Feishu.insert_columns(log_type, crawler, "GVxlYk", "ROWS", 1, 2)
+                    # 视频ID工作表,首行写入数据
+                    upload_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time())))
+                    values = [[upload_time,
+                               "定向榜",
+                               video_dict['video_id'],
+                               video_dict['video_title'],
+                               our_video_link,
+                               video_dict['play_cnt'],
+                               video_dict['duration'],
+                               f'{video_width}*{video_height}',
+                               video_dict['publish_time'],
+                               video_dict['user_name'],
+                               video_dict['cover_url'],
+                               video_dict['video_url']
+                               ]]
+                    time.sleep(1)
+                    Feishu.update_values(log_type, crawler, "GVxlYk", "F2:Z2", values)
+                    Common.logger(log_type, crawler).info('视频信息写入定向_已下载表成功\n')
 
-                # 视频信息保存数据库
-                sql = f""" insert into crawler_video(video_id, 
-                user_id, 
-                out_user_id, 
-                platform, 
-                strategy, 
-                out_video_id, 
-                video_title, 
-                cover_url, 
-                video_url, 
-                duration, 
-                publish_time, 
-                play_cnt, 
-                crawler_rule, 
-                width, 
-                height) 
-                values({our_video_id}, 
-                "{our_uid}", 
-                "{video_dict['out_uid']}", 
-                "{cls.platform}", 
-                "定向爬虫策略", 
-                "{video_dict['video_id']}", 
-                "{video_dict['video_title']}", 
-                "{video_dict['cover_url']}",
-                "{video_dict['video_url']}",
-                {int(duration)},
-                "{video_dict['publish_time']}",
-                {int(video_dict['play_cnt'])},
-                "{rule}",
-                {int(video_width)},
-                {int(video_height)}) """
-                MysqlHelper.update_values(log_type, crawler, sql, env, machine)
-                Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).info(f"download_publish异常:{e}\n")
+                    # 视频信息保存数据库
+                    sql = f""" insert into crawler_video(video_id, 
+                    user_id, 
+                    out_user_id, 
+                    platform, 
+                    strategy, 
+                    out_video_id, 
+                    video_title, 
+                    cover_url, 
+                    video_url, 
+                    duration, 
+                    publish_time, 
+                    play_cnt, 
+                    crawler_rule, 
+                    width, 
+                    height) 
+                    values({our_video_id}, 
+                    "{our_uid}", 
+                    "{video_dict['out_uid']}", 
+                    "{cls.platform}", 
+                    "定向爬虫策略", 
+                    "{video_dict['video_id']}", 
+                    "{video_dict['video_title']}", 
+                    "{video_dict['cover_url']}",
+                    "{video_dict['video_url']}",
+                    {int(duration)},
+                    "{video_dict['publish_time']}",
+                    {int(video_dict['play_cnt'])},
+                    "{rule}",
+                    {int(video_width)},
+                    {int(video_height)}) """
+                    MysqlHelper.update_values(log_type, crawler, sql, env, machine)
+                    Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+        except Exception as e:
+            Common.logger(log_type, crawler).info(f"download_publish异常:{e}\n")
 
     @classmethod
     def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
-        # try:
-        user_list = cls.get_user_from_feishu(log_type, crawler, 'c467d7', env, machine)
-        if len(user_list) == 0:
-            Common.logger(log_type, crawler).warning('用户列表为空\n')
-        else:
-            for user_dict in user_list:
-                out_uid = user_dict['out_user_id']
-                user_name = user_dict['out_user_name']
-                browse_id = user_dict['out_browse_id']
-                our_uid = user_dict['our_user_id']
-                Common.logger(log_type, crawler).info(f'获取 {user_name} 主页视频\n')
-                cls.get_videos(log_type, crawler, strategy, oss_endpoint, env, browse_id, out_uid, our_uid, machine)
-                Common.logger(log_type, crawler).info('休眠 10 秒')
-                time.sleep(10)
-                cls.continuation = ''
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error(f"get_follow_videos异常:{e}\n")
+        try:
+            user_list = cls.get_user_from_feishu(log_type, crawler, 'c467d7', env, machine)
+            if len(user_list) == 0:
+                Common.logger(log_type, crawler).warning('用户列表为空\n')
+            else:
+                for user_dict in user_list:
+                    out_uid = user_dict['out_user_id']
+                    user_name = user_dict['out_user_name']
+                    browse_id = user_dict['out_browse_id']
+                    our_uid = user_dict['our_user_id']
+                    Common.logger(log_type, crawler).info(f'获取 {user_name} 主页视频\n')
+                    cls.get_videos(log_type, crawler, strategy, oss_endpoint, env, browse_id, out_uid, our_uid, machine)
+                    Common.logger(log_type, crawler).info('休眠 10 秒')
+                    time.sleep(10)
+                    cls.continuation = ''
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_follow_videos异常:{e}\n")
 
 
 if __name__ == "__main__":