wangkun 2 سال پیش
والد
کامیت
7d679abaad

+ 1 - 1
common/common.py

@@ -213,7 +213,7 @@ class Common:
             Common.logger(log_type, crawler).info('No video Stream found!')
             return
         format1 = probe['format']
-        size = int(format1['size']) / 1024 / 1024
+        size = int(int(format1['size']) / 1024 / 1024)
         width = int(video_stream['width'])
         height = int(video_stream['height'])
         duration = int(float(video_stream['duration']))

+ 3 - 3
common/publish.py

@@ -64,11 +64,11 @@ class Publish:
         versionCode  版本 默认1
         :return:
         """
-        # Common.logger(log_type, crawler).info(f'publish request data: {request_data}')
+        Common.logger(log_type, crawler).info(f'publish request data: {request_data}')
         result = cls.request_post('https://longvideoapi.piaoquantv.com/longvideoapi/crawler/video/send', request_data)
-        # Common.logger(log_type, crawler).info(f'publish result: {result}')
+        Common.logger(log_type, crawler).info(f'publish result: {result}')
         video_id = result["data"]["id"]
-        # Common.logger(log_type, crawler).info(f'video_id: {video_id}')
+        Common.logger(log_type, crawler).info(f'video_id: {video_id}')
         if result['code'] != 0:
             Common.logger(log_type, crawler).error('pushlish failure msg = {}'.format(result['msg']))
         else:

+ 1 - 1
main/main.sh

@@ -1,6 +1,6 @@
 #!/bin/bash
 # 看一看+小程序 朋友圈榜单
-# sh ./main/main.sh ./kanyikan/kanyikan_main/run_kanyikan_moment.py --log_type="moment" --crawler="kanyikan" --strategy="kanyikan_moment" --our_uid="kanyikan_moment" --oss_endpoint="out" --env="dev" ./kanyikan/nohup.log local
+# sh ./weixinzhishu_main/weixinzhishu_main.sh ./kanyikan/kanyikan_main/run_kanyikan_moment.py --log_type="moment" --crawler="kanyikan" --strategy="kanyikan_moment" --our_uid="kanyikan_moment" --oss_endpoint="out" --env="dev" ./kanyikan/nohup.log local
 
 crawler_dir=$1  # 爬虫执行路径,如: ./youtube/youtube_main/run_youtube_follow.py
 log_type=$2     # 日志命名格式,如: follow,则在 youtube/logs/目录下,生成 2023-02-08-follow.log

+ 3 - 1
requirements.txt

@@ -3,5 +3,7 @@ loguru==0.6.0
 oss2==2.15.0
 PyMySQL==1.0.2
 requests==2.27.1
-selenium==4.8.0
+selenium~=4.2.0
 urllib3==1.26.9
+Appium-Python-Client~=2.7.1
+psutil~=5.9.2

+ 0 - 0
weixinzhishu/main/__init__.py → weixinzhishu/weixinzhishu_main/__init__.py


+ 169 - 0
weixinzhishu/weixinzhishu_main/get_search_key.py

@@ -0,0 +1,169 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/10
+import json
+import os
+import sys
+import time
+import psutil as psutil
+from appium import webdriver
+from selenium.webdriver.common.by import By
+
+from common.common import Common
+
+sys.path.append(os.getcwd())
+
+
+class ShipinhaoWindows:
+    @classmethod
+    def kill_pid(cls, log_type):
+        try:
+            os.system('chcp 65001')  # 将cmd的显示字符编码从默认的GBK改为UTF-8
+            list_process = list()
+            pid_list = psutil.pids()
+            for sub_pid in pid_list:
+                try:
+                    process_info = psutil.Process(sub_pid)
+                    if process_info.name() == 'WechatBrowser.exe' or process_info.name() == 'WeChatPlayer.exe':
+                        list_process.append(sub_pid)
+                except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
+                    pass
+            for pid in list_process:
+                os.system('taskkill /f /pid ' + str(pid))
+        except Exception as e:
+            Common.logger(log_type).error('kill_pid异常:{}', e)
+
+    @classmethod
+    def click_video(cls, log_type, crawler):
+        try:
+            Common.logger(log_type, crawler).info('启动"微信"')
+            desired_caps = {'app': r"C:\Program Files (x86)\Tencent\WeChat\WeChat.exe"}
+            driver = webdriver.Remote(
+                command_executor='http://127.0.0.1:4723',
+                desired_capabilities=desired_caps)
+            driver.implicitly_wait(10)
+
+            # Common.logger(log_type).info('点击"聊天窗口"')
+            # driver.find_element(By.NAME, '聊天').click()
+            #
+            # Common.logger(log_type).info('点击"爬虫群"')
+            # driver.find_elements(By.NAME, '爬虫群')[0].click()
+
+            Common.logger(log_type, crawler).info('点击微信指数')
+            driver.find_elements(By.NAME, '消息')[-1].click()
+
+            Common.logger(log_type, crawler).info('休眠 10 秒,退出微信指数')
+            time.sleep(10)
+            cls.kill_pid(log_type)
+
+            Common.logger(log_type, crawler).info('退出微信')
+            driver.quit()
+        except Exception as e:
+            Common.logger(log_type, crawler).error('click_video异常:{}', e)
+
+    @classmethod
+    def get_url(cls, log_type):
+        try:
+            # charles 抓包文件保存目录
+            charles_file_dir = r"./chlsfiles/"
+
+            if len(os.listdir(charles_file_dir)) == 0:
+                Common.logger(log_type).info("未找到chlsfile文件,等待2s")
+                time.sleep(2)
+            else:
+                # 目标文件夹下所有文件
+                all_file = sorted(os.listdir(charles_file_dir))
+
+                # 获取到目标文件
+                old_file = all_file[-1]
+
+                # 分离文件名与扩展名
+                new_file = os.path.splitext(old_file)
+
+                # 重命名文件后缀
+                os.rename(os.path.join(charles_file_dir, old_file),
+                          os.path.join(charles_file_dir, new_file[0] + ".txt"))
+
+                with open(charles_file_dir + new_file[0] + ".txt", encoding='utf-8-sig', errors='ignore') as f:
+                    contents = json.load(f, strict=False)
+
+                video_url_list = []
+                cover_url_list = []
+
+                if "finder.video.qq.com" in [text['host'] for text in contents]:
+                    for text in contents:
+                        if text["host"] == "finder.video.qq.com" and text["path"] == "/251/20302/stodownload":
+                            video_url_list.append(text)
+                        elif text["host"] == "finder.video.qq.com" and text["path"] == "/251/20304/stodownload":
+                            cover_url_list.append(text)
+
+                    video_url = video_url_list[0]['host']+video_url_list[0]['path']+'?'+video_url_list[0]['query']
+                    cover_url = cover_url_list[0]['host']+cover_url_list[0]['path']+'?'+cover_url_list[0]['query']
+                    head_url = cover_url
+
+                    # print(f'video_url:{video_url}')
+                    # print(f'cover_url:{cover_url}')
+                    # print(f'head_url:{head_url}')
+
+                    return video_url, cover_url, head_url
+                else:
+                    Common.logger(log_type).info("未找到url")
+                    return '未找到url'
+
+        except Exception as e:
+            Common.logger(log_type).exception("get_url异常:{}\n", e)
+            return None
+
+    @classmethod
+    def write_url(cls, log_type):
+        try:
+            while True:
+                if Feishu.get_values_batch(log_type, 'shipinhao', 'FSDlBy')[1][11] is None:
+                    Common.del_charles_files('recommend')
+                    cls.click_video(log_type)
+                    Common.logger(log_type).info('等待 2s')
+                    time.sleep(2)
+                    Common.logger(log_type).info('获取视频头像/封面/播放地址')
+                    urls = cls.get_url(log_type)
+                    if urls == '未找到url':
+                        time.sleep(1)
+                        cls.write_url(log_type)
+                    elif urls is None:
+                        time.sleep(1)
+                        cls.write_url(log_type)
+                    else:
+                        Feishu.update_values(log_type, 'shipinhao', 'FSDlBy', 'J2:L2',
+                                             [['https://'+urls[2], 'https://'+urls[1], 'https://'+urls[0]]])
+                        Common.logger(log_type).info('视频地址信息写入飞书成功\n')
+                        Common.del_charles_files('recommend')
+                        break
+                else:
+                    Common.logger(log_type).info('视频已有地址信息,休眠 10s')
+                    time.sleep(10)
+                    break
+        except Exception as e:
+            # Feishu.dimension_range(log_type, 'shipinhao', 'FSDlBy', 'ROWS', 2, 2)
+            Common.logger(log_type).error('write_url异常:{}\n', e)
+
+    @classmethod
+    def run_get_url(cls, log_type):
+        try:
+            while True:
+                if len(Feishu.get_values_batch(log_type, 'shipinhao', 'FSDlBy')) == 1:
+                    Common.logger(log_type).info('暂无需要获取地址的视频信息')
+                    time.sleep(30)
+                    break
+                else:
+                    cls.write_url(log_type)
+
+        except Exception as e:
+            Common.logger(log_type).error('run_get_url异常:{}\n', e)
+
+
+if __name__ == '__main__':
+    while True:
+        ShipinhaoWindows.run_get_url('recommend')
+        Common.del_logs('recommend')
+        time.sleep(1)
+
+    pass

+ 27 - 0
weixinzhishu/weixinzhishu_main/search_key.py

@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/10
+from appium import webdriver
+from selenium.webdriver.common.by import By
+
+
+class Searchkey:
+    @classmethod
+    def start_weixinzhishu(cls):
+        desired_caps = {'app': 'Root', 'deviceName': 'windowsPC', 'platformName': 'Windows'}
+        driver = webdriver.Remote(
+            command_executor='http://127.0.0.1:4723',
+            desired_capabilities=desired_caps)
+
+        main_win = driver.find_element(By.NAME, '微信指数')
+        print(main_win)
+        hd = hex(int(main_win.get_attribute("NativeWindowHandle")))
+        print(hd)
+        caps = {'appTopLevelWindow': str(hd)}
+        driver = webdriver.Remote(
+            command_executor='http://127.0.0.1:4723',
+            desired_capabilities=caps)
+
+
+if __name__ == "__main__":
+    Searchkey.start_weixinzhishu()

+ 33 - 0
weixinzhishu/weixinzhishu_main/weixinzhishu.py

@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/10
+import requests
+import json
+
+
+class Weixinzhishu:
+    @classmethod
+    def weixinzhishu(cls, log_type, crawler, query):
+        url = "https://search.weixin.qq.com/cgi-bin/wxaweb/wxindex"
+        payload = json.dumps({
+            "openid": "ov4ns0OAM_om-YOT7idMCe5gxoeQ",
+            "search_key": "1676007537546259_3170762354",
+            "cgi_name": "GetDefaultIndex",
+            "start_ymd": "20230206",
+            "end_ymd": "20230210",
+            "query": query
+        })
+        headers = {
+            'Host': 'search.weixin.qq.com',
+            'content-type': 'application/json',
+            'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.32(0x1800202a) NetType/WIFI Language/zh_CN',
+            'Referer': 'https://servicewechat.com/wxc026e7662ec26a3a/42/page-frame.html'
+        }
+        response = requests.request("POST", url, headers=headers, data=payload)
+        print(response.text)
+        time_index = response.json()['content']['resp_list'][0]['indexes'][0]['time_indexes']
+        print(time_index)
+
+
+if __name__ == "__main__":
+    Weixinzhishu.weixinzhishu('weixin', 'weixinzhishu', '狂飙')

+ 151 - 151
youtube/youtube_follow/youtube_follow.py

@@ -676,31 +676,31 @@ class Follow:
 
     @classmethod
     def get_videos(cls, log_type, crawler, strategy, oss_endpoint, env, browse_id, out_uid, our_uid, machine):
-        try:
-            while True:
-                feeds = cls.get_feeds(log_type, crawler, browse_id, out_uid)
-                # Common.logger(log_type, crawler).info(f"feeds:{feeds}\n")
-                for i in range(len(feeds)):
-                    if 'richItemRenderer' not in feeds[i]:
-                        Common.logger(log_type, crawler).warning(f'feeds:{feeds[i]}\n')
-                    elif 'content' not in feeds[i]['richItemRenderer']:
-                        Common.logger(log_type, crawler).warning(f'feeds:{feeds[i]["richItemRenderer"]}\n')
-                    elif 'videoRenderer' not in feeds[i]['richItemRenderer']['content']:
-                        Common.logger(log_type, crawler).warning(f'feeds:{feeds[i]["richItemRenderer"]["content"]}\n')
-                    elif 'videoId' not in feeds[i]["richItemRenderer"]["content"]['videoRenderer']:
-                        Common.logger(log_type, crawler).warning(f'feeds:{feeds[i]["richItemRenderer"]["content"]["videoRenderer"]}\n')
+        # try:
+        while True:
+            feeds = cls.get_feeds(log_type, crawler, browse_id, out_uid)
+            # Common.logger(log_type, crawler).info(f"feeds:{feeds}\n")
+            for i in range(len(feeds)):
+                if 'richItemRenderer' not in feeds[i]:
+                    Common.logger(log_type, crawler).warning(f'feeds:{feeds[i]}\n')
+                elif 'content' not in feeds[i]['richItemRenderer']:
+                    Common.logger(log_type, crawler).warning(f'feeds:{feeds[i]["richItemRenderer"]}\n')
+                elif 'videoRenderer' not in feeds[i]['richItemRenderer']['content']:
+                    Common.logger(log_type, crawler).warning(f'feeds:{feeds[i]["richItemRenderer"]["content"]}\n')
+                elif 'videoId' not in feeds[i]["richItemRenderer"]["content"]['videoRenderer']:
+                    Common.logger(log_type, crawler).warning(f'feeds:{feeds[i]["richItemRenderer"]["content"]["videoRenderer"]}\n')
+                else:
+                    video_id = feeds[i]["richItemRenderer"]["content"]['videoRenderer']['videoId']
+                    video_dict = cls.get_video_info(log_type, crawler, out_uid, video_id, machine)
+                    # 发布时间<=30天
+                    publish_time = int(time.mktime(time.strptime(video_dict['publish_time'], "%Y-%m-%d")))
+                    if int(time.time()) - publish_time <= 3600*24*30:
+                        cls.download_publish(log_type, crawler, video_dict, strategy, our_uid, env, oss_endpoint, machine)
                     else:
-                        video_id = feeds[i]["richItemRenderer"]["content"]['videoRenderer']['videoId']
-                        video_dict = cls.get_video_info(log_type, crawler, out_uid, video_id, machine)
-                        # 发布时间<=30天
-                        publish_time = int(time.mktime(time.strptime(video_dict['publish_time'], "%Y-%m-%d")))
-                        if int(time.time()) - publish_time <= 3600*24*30:
-                            cls.download_publish(log_type, crawler, video_dict, strategy, our_uid, env, oss_endpoint, machine)
-                        else:
-                            Common.logger(log_type, crawler).info('发布时间超过30天\n')
-                            return
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f"get_videos异常:{e}\n")
+                        Common.logger(log_type, crawler).info('发布时间超过30天\n')
+                        return
+        # except Exception as e:
+        #     Common.logger(log_type, crawler).error(f"get_videos异常:{e}\n")
 
     @classmethod
     def get_video_info(cls, log_type, crawler, out_uid, video_id, machine):
@@ -973,141 +973,141 @@ class Follow:
 
     @classmethod
     def download_publish(cls, log_type, crawler, video_dict, strategy, our_uid, env, oss_endpoint, machine):
-        try:
-            sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_dict['video_id']}" """
-            repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
-            if video_dict['video_title'] == '' or  video_dict['video_url'] == '':
-                Common.logger(log_type, crawler).info('无效视频\n')
-            elif video_dict['duration'] > 600 or video_dict['duration'] < 60:
-                Common.logger(log_type, crawler).info(f"时长:{video_dict['duration']}不满足规则\n")
-            elif repeat_video is not None and len(repeat_video) != 0:
-                Common.logger(log_type, crawler).info('视频已下载\n')
-            else:
-                # 下载视频
-                Common.logger(log_type, crawler).info('开始下载视频...')
-                Common.download_method(log_type, crawler, 'video', video_dict['video_title'], video_dict['video_url'])
-                ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/")
-                video_width = ffmpeg_dict['width']
-                video_height = ffmpeg_dict['height']
-                duration = int(ffmpeg_dict['duration'])
-                video_size = ffmpeg_dict['size']
+        # try:
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_dict['video_id']}" """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
+        if video_dict['video_title'] == '' or  video_dict['video_url'] == '':
+            Common.logger(log_type, crawler).info('无效视频\n')
+        elif video_dict['duration'] > 600 or video_dict['duration'] < 60:
+            Common.logger(log_type, crawler).info(f"时长:{video_dict['duration']}不满足规则\n")
+        elif repeat_video is not None and len(repeat_video) != 0:
+            Common.logger(log_type, crawler).info('视频已下载\n')
+        else:
+            # 下载视频
+            Common.logger(log_type, crawler).info('开始下载视频...')
+            Common.download_method(log_type, crawler, 'video', video_dict['video_title'], video_dict['video_url'])
+            ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+            video_width = int(ffmpeg_dict['width'])
+            video_height = int(ffmpeg_dict['height'])
+            duration = int(ffmpeg_dict['duration'])
+            video_size = int(ffmpeg_dict['size'])
 
-                Common.logger(log_type, crawler).info(f'video_width:{video_width}')
-                Common.logger(log_type, crawler).info(f'video_height:{video_height}')
-                Common.logger(log_type, crawler).info(f'duration:{duration}')
-                Common.logger(log_type, crawler).info(f'video_size:{video_size}\n')
+            Common.logger(log_type, crawler).info(f'video_width:{video_width}')
+            Common.logger(log_type, crawler).info(f'video_height:{video_height}')
+            Common.logger(log_type, crawler).info(f'duration:{duration}')
+            Common.logger(log_type, crawler).info(f'video_size:{video_size}\n')
 
-                video_dict['video_width'] = video_width
-                video_dict['video_height'] = video_height
-                video_dict['duration'] = duration
-                video_dict['comment_cnt'] = 0
-                video_dict['like_cnt'] = 0
-                video_dict['share_cnt'] = 0
-                video_dict['avatar_url'] = video_dict['cover_url']
-                video_dict['session'] = f'youtube{int(time.time())}'
-                rule='1,2'
-                if duration < 60 or duration > 600:
-                    # 删除视频文件夹
-                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}/")
-                    Common.logger(log_type, crawler).info(f"时长:{video_dict['duration']}不满足抓取规则,删除成功\n")
-                    return
-                elif video_size == 0 or duration == 0 or video_size is None or duration is None:
-                    # 删除视频文件夹
-                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}/")
-                    Common.logger(log_type, crawler).info(f"视频下载出错,删除成功\n")
-                    return
-                else:
-                    # 下载封面
-                    Common.download_method(log_type, crawler, 'cover', video_dict['video_title'], video_dict['cover_url'])
-                    # 保存视频文本信息
-                    Common.save_video_info(log_type, crawler, video_dict)
+            video_dict['video_width'] = video_width
+            video_dict['video_height'] = video_height
+            video_dict['duration'] = duration
+            video_dict['comment_cnt'] = 0
+            video_dict['like_cnt'] = 0
+            video_dict['share_cnt'] = 0
+            video_dict['avatar_url'] = video_dict['cover_url']
+            video_dict['session'] = f'youtube{int(time.time())}'
+            rule='1,2'
+            if duration < 60 or duration > 600:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}/")
+                Common.logger(log_type, crawler).info(f"时长:{video_dict['duration']}不满足抓取规则,删除成功\n")
+                return
+            elif video_size == 0 or duration == 0 or video_size is None or duration is None:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}/")
+                Common.logger(log_type, crawler).info(f"视频下载出错,删除成功\n")
+                return
+            else:
+                # 下载封面
+                Common.download_method(log_type, crawler, 'cover', video_dict['video_title'], video_dict['cover_url'])
+                # 保存视频文本信息
+                Common.save_video_info(log_type, crawler, video_dict)
 
-                    # 上传视频
-                    Common.logger(log_type, crawler).info(f"开始上传视频")
-                    if env == 'dev':
-                        our_video_id = Publish.upload_and_publish(log_type, crawler, strategy, our_uid, env, oss_endpoint)
-                        our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-                    else:
-                        our_video_id = Publish.upload_and_publish(log_type, crawler, strategy, our_uid, env, oss_endpoint)
-                        our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-                    Common.logger(log_type, crawler).info("视频上传完成")
+                # 上传视频
+                Common.logger(log_type, crawler).info(f"开始上传视频")
+                if env == 'dev':
+                    our_video_id = Publish.upload_and_publish(log_type, crawler, strategy, our_uid, env, oss_endpoint)
+                    our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                else:
+                    our_video_id = Publish.upload_and_publish(log_type, crawler, strategy, our_uid, env, oss_endpoint)
+                    our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                Common.logger(log_type, crawler).info("视频上传完成")
 
-                    # 视频信息保存至飞书
-                    Feishu.insert_columns(log_type, crawler, "GVxlYk", "ROWS", 1, 2)
-                    # 视频ID工作表,首行写入数据
-                    upload_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time())))
-                    values = [[upload_time,
-                               "定向榜",
-                               video_dict['video_id'],
-                               video_dict['video_title'],
-                               our_video_link,
-                               video_dict['play_cnt'],
-                               video_dict['duration'],
-                               f'{video_width}*{video_height}',
-                               video_dict['publish_time'],
-                               video_dict['user_name'],
-                               video_dict['cover_url'],
-                               video_dict['video_url']
-                               ]]
-                    time.sleep(1)
-                    Feishu.update_values(log_type, crawler, "GVxlYk", "F2:Z2", values)
-                    Common.logger(log_type, crawler).info('视频信息写入定向_已下载表成功\n')
+                # 视频信息保存至飞书
+                Feishu.insert_columns(log_type, crawler, "GVxlYk", "ROWS", 1, 2)
+                # 视频ID工作表,首行写入数据
+                upload_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time())))
+                values = [[upload_time,
+                           "定向榜",
+                           video_dict['video_id'],
+                           video_dict['video_title'],
+                           our_video_link,
+                           video_dict['play_cnt'],
+                           video_dict['duration'],
+                           f'{video_width}*{video_height}',
+                           video_dict['publish_time'],
+                           video_dict['user_name'],
+                           video_dict['cover_url'],
+                           video_dict['video_url']
+                           ]]
+                time.sleep(1)
+                Feishu.update_values(log_type, crawler, "GVxlYk", "F2:Z2", values)
+                Common.logger(log_type, crawler).info('视频信息写入定向_已下载表成功\n')
 
-                    # 视频信息保存数据库
-                    sql = f""" insert into crawler_video(video_id, 
-                    user_id, 
-                    out_user_id, 
-                    platform, 
-                    strategy, 
-                    out_video_id, 
-                    video_title, 
-                    cover_url, 
-                    video_url, 
-                    duration, 
-                    publish_time, 
-                    play_cnt, 
-                    crawler_rule, 
-                    width, 
-                    height) 
-                    values({our_video_id}, 
-                    "{our_uid}", 
-                    "{video_dict['out_uid']}", 
-                    "{cls.platform}", 
-                    "定向爬虫策略", 
-                    "{video_dict['video_id']}", 
-                    "{video_dict['video_title']}", 
-                    "{video_dict['cover_url']}",
-                    "{video_dict['video_url']}",
-                    {int(duration)},
-                    "{video_dict['publish_time']}",
-                    {int(video_dict['play_cnt'])},
-                    "{rule}",
-                    {int(video_width)},
-                    {int(video_height)}) """
-                    MysqlHelper.update_values(log_type, crawler, sql, env, machine)
-                    Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
-        except Exception as e:
-            Common.logger(log_type, crawler).info(f"download_publish异常:{e}\n")
+                # 视频信息保存数据库
+                sql = f""" insert into crawler_video(video_id, 
+                user_id, 
+                out_user_id, 
+                platform, 
+                strategy, 
+                out_video_id, 
+                video_title, 
+                cover_url, 
+                video_url, 
+                duration, 
+                publish_time, 
+                play_cnt, 
+                crawler_rule, 
+                width, 
+                height) 
+                values({our_video_id}, 
+                "{our_uid}", 
+                "{video_dict['out_uid']}", 
+                "{cls.platform}", 
+                "定向爬虫策略", 
+                "{video_dict['video_id']}", 
+                "{video_dict['video_title']}", 
+                "{video_dict['cover_url']}",
+                "{video_dict['video_url']}",
+                {int(duration)},
+                "{video_dict['publish_time']}",
+                {int(video_dict['play_cnt'])},
+                "{rule}",
+                {int(video_width)},
+                {int(video_height)}) """
+                MysqlHelper.update_values(log_type, crawler, sql, env, machine)
+                Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+        # except Exception as e:
+        #     Common.logger(log_type, crawler).info(f"download_publish异常:{e}\n")
 
     @classmethod
     def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
-        try:
-            user_list = cls.get_user_from_feishu(log_type, crawler, 'c467d7', env, machine)
-            if len(user_list) == 0:
-                Common.logger(log_type, crawler).warning('用户列表为空\n')
-            else:
-                for user_dict in user_list:
-                    out_uid = user_dict['out_user_id']
-                    user_name = user_dict['out_user_name']
-                    browse_id = user_dict['out_browse_id']
-                    our_uid = user_dict['our_user_id']
-                    Common.logger(log_type, crawler).info(f'获取 {user_name} 主页视频\n')
-                    cls.get_videos(log_type, crawler, strategy, oss_endpoint, env, browse_id, out_uid, our_uid, machine)
-                    Common.logger(log_type, crawler).info('休眠 10 秒')
-                    time.sleep(10)
-                    cls.continuation = ''
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f"get_follow_videos异常:{e}\n")
+        # try:
+        user_list = cls.get_user_from_feishu(log_type, crawler, 'c467d7', env, machine)
+        if len(user_list) == 0:
+            Common.logger(log_type, crawler).warning('用户列表为空\n')
+        else:
+            for user_dict in user_list:
+                out_uid = user_dict['out_user_id']
+                user_name = user_dict['out_user_name']
+                browse_id = user_dict['out_browse_id']
+                our_uid = user_dict['our_user_id']
+                Common.logger(log_type, crawler).info(f'获取 {user_name} 主页视频\n')
+                cls.get_videos(log_type, crawler, strategy, oss_endpoint, env, browse_id, out_uid, our_uid, machine)
+                Common.logger(log_type, crawler).info('休眠 10 秒')
+                time.sleep(10)
+                cls.continuation = ''
+        # except Exception as e:
+        #     Common.logger(log_type, crawler).error(f"get_follow_videos异常:{e}\n")
 
 
 if __name__ == "__main__":