wangkun há 1 ano atrás
pai
commit
fee4528e47

+ 7 - 7
common/publish.py

@@ -188,13 +188,13 @@ class Publish:
             uids_prod_xiaoniangao_play = [50322222, 50322223, 50322224, 50322225]
             uids_prod_xiaoniangao_play = [50322222, 50322223, 50322224, 50322225]
             return random.choice(uids_prod_xiaoniangao_play)
             return random.choice(uids_prod_xiaoniangao_play)
 
 
-        elif crawler == 'gongzhonghao' and env == 'prod' and strategy == '定向爬虫策略':
-            uids_prod_gongzhonghao_follow = [26117675, 26117676, 26117677, 26117678, 26117679, 26117680]
-            return random.choice(uids_prod_gongzhonghao_follow)
-
-        elif crawler == 'xigua' and env == 'prod' and strategy == '推荐榜爬虫策略':
-            uids_prod_gongzhonghao_follow = [50322238]
-            return random.choice(uids_prod_gongzhonghao_follow)
+        # elif crawler == 'gongzhonghao' and env == 'prod' and strategy == '定向爬虫策略':
+        #     uids_prod_gongzhonghao_follow = [26117675, 26117676, 26117677, 26117678, 26117679, 26117680]
+        #     return random.choice(uids_prod_gongzhonghao_follow)
+        #
+        # elif crawler == 'xigua' and env == 'prod' and strategy == '推荐榜爬虫策略':
+        #     uids_prod_gongzhonghao_follow = [50322238]
+        #     return random.choice(uids_prod_gongzhonghao_follow)
 
 
         # elif crawler == 'benshanzhufu' and env == 'prod' and strategy == '推荐榜爬虫策略':
         # elif crawler == 'benshanzhufu' and env == 'prod' and strategy == '推荐榜爬虫策略':
         #     uids_prod_benshanzhufu_recommend = [20631262, 20631263, 20631264, 20631265, 20631266, 20631267, 20631268, 20631269, 20631271, 20631272]
         #     uids_prod_benshanzhufu_recommend = [20631262, 20631263, 20631264, 20631265, 20631266, 20631267, 20631268, 20631269, 20631271, 20631272]

+ 1 - 1
kuaishou/kuaishou_author/kuaishou_author.py → kuaishou/kuaishou_author/kuaishou_author_scheduling.py

@@ -171,7 +171,7 @@ class KuaishouauthorScheduling:
                         Common.logger(log_type, crawler).info(f"{k}:{v}")
                         Common.logger(log_type, crawler).info(f"{k}:{v}")
 
 
                     if int((int(time.time()) - int(publish_time_stamp)) / (3600*24)) > int(rule_dict.get("period", {}).get("max", 1000)):
                     if int((int(time.time()) - int(publish_time_stamp)) / (3600*24)) > int(rule_dict.get("period", {}).get("max", 1000)):
-                        Common.logger(log_type, crawler).info(f'发布时间超过{int((int(time.time()) - int(publish_time_stamp)) / (3600*24))}天\n')
+                        Common.logger(log_type, crawler).info(f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
                         return
                         return
                     if video_dict["video_id"] == '' or video_dict["cover_url"] == '' or video_dict["video_url"] == '':
                     if video_dict["video_id"] == '' or video_dict["cover_url"] == '' or video_dict["video_url"] == '':
                         Common.logger(log_type, crawler).info('无效视频\n')
                         Common.logger(log_type, crawler).info('无效视频\n')

+ 1 - 1
kuaishou/kuaishou_main/run_kuaishou_author_scheduling.py

@@ -8,7 +8,7 @@ sys.path.append(os.getcwd())
 from common.common import Common
 from common.common import Common
 from common.public import task_fun
 from common.public import task_fun
 from common.scheduling_db import MysqlHelper
 from common.scheduling_db import MysqlHelper
-from kuaishou.kuaishou_author.kuaishou_author import KuaishouauthorScheduling
+from kuaishou.kuaishou_author.kuaishou_author_scheduling import KuaishouauthorScheduling
 
 
 
 
 def main(log_type, crawler, task, env):
 def main(log_type, crawler, task, env):

+ 14 - 14
main/process.sh

@@ -159,20 +159,20 @@ else
   echo "$(date "+%Y-%m-%d %H:%M:%S") 抖音推荐爬虫策略 进程状态正常" >> ${log_path}
   echo "$(date "+%Y-%m-%d %H:%M:%S") 抖音推荐爬虫策略 进程状态正常" >> ${log_path}
 fi
 fi
 
 
-# 西瓜定向爬虫策略
-echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 西瓜定向爬虫策略 进程状态" >> ${log_path}
-ps -ef | grep "run_xigua_follow.py" | grep -v "grep"
-if [ "$?" -eq 1 ];then
-  echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
-  if [ ${env} = "dev" ];then
-    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="author" --crawler="xigua" --env="dev" xigua/logs/nohup-follow.log
-  else
-    cd ${piaoquan_crawler_dir} && /usr/bin/sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="author" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" xigua/logs/nohup-follow.log
-  fi
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
-else
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 西瓜定向爬虫策略 进程状态正常" >> ${log_path}
-fi
+## 西瓜定向爬虫策略
+#echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 西瓜定向爬虫策略 进程状态" >> ${log_path}
+#ps -ef | grep "run_xigua_follow.py" | grep -v "grep"
+#if [ "$?" -eq 1 ];then
+#  echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
+#  if [ ${env} = "dev" ];then
+#    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="author" --crawler="xigua" --env="dev" xigua/logs/nohup-follow.log
+#  else
+#    cd ${piaoquan_crawler_dir} && /usr/bin/sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="author" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" xigua/logs/nohup-follow.log
+#  fi
+#  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
+#else
+#  echo "$(date "+%Y-%m-%d %H:%M:%S") 西瓜定向爬虫策略 进程状态正常" >> ${log_path}
+#fi
 
 
 ## 西瓜推荐榜爬虫策略
 ## 西瓜推荐榜爬虫策略
 #echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 西瓜推荐榜爬虫策略 进程状态" >> ${log_path}
 #echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 西瓜推荐榜爬虫策略 进程状态" >> ${log_path}

+ 3 - 0
xigua/xigua_author/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/5/26

+ 760 - 0
xigua/xigua_author/xigua_author_scheduling.py

@@ -0,0 +1,760 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/5/26
+import base64
+import json
+import os
+import random
+import shutil
+import string
+import sys
+import time
+from hashlib import md5
+import requests
+import urllib3
+from requests.adapters import HTTPAdapter
+sys.path.append(os.getcwd())
+from common.userAgent import get_random_user_agent
+from common.scheduling_db import MysqlHelper
+from common.common import Common
+from common.feishu import Feishu
+from common.publish import Publish
+from common.public import get_config_from_mysql, download_rule
+
+
+class XiguaauthorScheduling:
+    platform = "西瓜视频"
+
+    @classmethod
+    def random_signature(cls):
+        src_digits = string.digits  # string_数字
+        src_uppercase = string.ascii_uppercase  # string_大写字母
+        src_lowercase = string.ascii_lowercase  # string_小写字母
+        digits_num = random.randint(1, 6)
+        uppercase_num = random.randint(1, 26 - digits_num - 1)
+        lowercase_num = 26 - (digits_num + uppercase_num)
+        password = random.sample(src_digits, digits_num) + random.sample(src_uppercase, uppercase_num) + random.sample(
+            src_lowercase, lowercase_num)
+        random.shuffle(password)
+        new_password = 'AAAAAAAAAA' + ''.join(password)[10:-4] + 'AAAB'
+        new_password_start = new_password[0:18]
+        new_password_end = new_password[-7:]
+        if new_password[18] == '8':
+            new_password = new_password_start + 'w' + new_password_end
+        elif new_password[18] == '9':
+            new_password = new_password_start + 'x' + new_password_end
+        elif new_password[18] == '-':
+            new_password = new_password_start + 'y' + new_password_end
+        elif new_password[18] == '.':
+            new_password = new_password_start + 'z' + new_password_end
+        else:
+            new_password = new_password_start + 'y' + new_password_end
+        return new_password
+
+    @classmethod
+    def get_video_url(cls, video_info):
+        video_url_dict = {}
+        # video_url
+        if 'videoResource' not in video_info:
+            video_url_dict["video_url"] = ''
+            video_url_dict["audio_url"] = ''
+            video_url_dict["video_width"] = 0
+            video_url_dict["video_height"] = 0
+
+        elif 'dash_120fps' in video_info['videoResource']:
+            if "video_list" in video_info['videoResource']['dash_120fps'] and 'video_4' in \
+                    video_info['videoResource']['dash_120fps']['video_list']:
+                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_4']['backup_url_1']
+                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_4']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_4']['vwidth']
+                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_4']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_3' in \
+                    video_info['videoResource']['dash_120fps']['video_list']:
+                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_3']['backup_url_1']
+                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_3']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_3']['vwidth']
+                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_3']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_2' in \
+                    video_info['videoResource']['dash_120fps']['video_list']:
+                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_2']['backup_url_1']
+                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_2']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_2']['vwidth']
+                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_2']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_1' in \
+                    video_info['videoResource']['dash_120fps']['video_list']:
+                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_1']['backup_url_1']
+                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_1']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_1']['vwidth']
+                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_1']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+
+            elif 'dynamic_video' in video_info['videoResource']['dash_120fps'] \
+                    and 'dynamic_video_list' in video_info['videoResource']['dash_120fps']['dynamic_video'] \
+                    and 'dynamic_audio_list' in video_info['videoResource']['dash_120fps']['dynamic_video'] \
+                    and len(
+                video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list']) != 0 \
+                    and len(
+                video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_audio_list']) != 0:
+
+                video_url = \
+                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1][
+                        'backup_url_1']
+                audio_url = \
+                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_audio_list'][-1][
+                        'backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = \
+                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1][
+                        'vwidth']
+                video_height = \
+                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1][
+                        'vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            else:
+                video_url_dict["video_url"] = ''
+                video_url_dict["audio_url"] = ''
+                video_url_dict["video_width"] = 0
+                video_url_dict["video_height"] = 0
+
+        elif 'dash' in video_info['videoResource']:
+            if "video_list" in video_info['videoResource']['dash'] and 'video_4' in \
+                    video_info['videoResource']['dash']['video_list']:
+                video_url = video_info['videoResource']['dash']['video_list']['video_4']['backup_url_1']
+                audio_url = video_info['videoResource']['dash']['video_list']['video_4']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['video_list']['video_4']['vwidth']
+                video_height = video_info['videoResource']['dash']['video_list']['video_4']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash'] and 'video_3' in \
+                    video_info['videoResource']['dash']['video_list']:
+                video_url = video_info['videoResource']['dash']['video_list']['video_3']['backup_url_1']
+                audio_url = video_info['videoResource']['dash']['video_list']['video_3']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['video_list']['video_3']['vwidth']
+                video_height = video_info['videoResource']['dash']['video_list']['video_3']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash'] and 'video_2' in \
+                    video_info['videoResource']['dash']['video_list']:
+                video_url = video_info['videoResource']['dash']['video_list']['video_2']['backup_url_1']
+                audio_url = video_info['videoResource']['dash']['video_list']['video_2']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['video_list']['video_2']['vwidth']
+                video_height = video_info['videoResource']['dash']['video_list']['video_2']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash'] and 'video_1' in \
+                    video_info['videoResource']['dash']['video_list']:
+                video_url = video_info['videoResource']['dash']['video_list']['video_1']['backup_url_1']
+                audio_url = video_info['videoResource']['dash']['video_list']['video_1']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['video_list']['video_1']['vwidth']
+                video_height = video_info['videoResource']['dash']['video_list']['video_1']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+
+            elif 'dynamic_video' in video_info['videoResource']['dash'] \
+                    and 'dynamic_video_list' in video_info['videoResource']['dash']['dynamic_video'] \
+                    and 'dynamic_audio_list' in video_info['videoResource']['dash']['dynamic_video'] \
+                    and len(video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list']) != 0 \
+                    and len(video_info['videoResource']['dash']['dynamic_video']['dynamic_audio_list']) != 0:
+
+                video_url = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
+                    'backup_url_1']
+                audio_url = video_info['videoResource']['dash']['dynamic_video']['dynamic_audio_list'][-1][
+                    'backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
+                    'vwidth']
+                video_height = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
+                    'vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            else:
+                video_url_dict["video_url"] = ''
+                video_url_dict["audio_url"] = ''
+                video_url_dict["video_width"] = 0
+                video_url_dict["video_height"] = 0
+
+        elif 'normal' in video_info['videoResource']:
+            if "video_list" in video_info['videoResource']['normal'] and 'video_4' in \
+                    video_info['videoResource']['normal']['video_list']:
+                video_url = video_info['videoResource']['normal']['video_list']['video_4']['backup_url_1']
+                audio_url = video_info['videoResource']['normal']['video_list']['video_4']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['video_list']['video_4']['vwidth']
+                video_height = video_info['videoResource']['normal']['video_list']['video_4']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['normal'] and 'video_3' in \
+                    video_info['videoResource']['normal']['video_list']:
+                video_url = video_info['videoResource']['normal']['video_list']['video_3']['backup_url_1']
+                audio_url = video_info['videoResource']['normal']['video_list']['video_3']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['video_list']['video_3']['vwidth']
+                video_height = video_info['videoResource']['normal']['video_list']['video_3']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['normal'] and 'video_2' in \
+                    video_info['videoResource']['normal']['video_list']:
+                video_url = video_info['videoResource']['normal']['video_list']['video_2']['backup_url_1']
+                audio_url = video_info['videoResource']['normal']['video_list']['video_2']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['video_list']['video_2']['vwidth']
+                video_height = video_info['videoResource']['normal']['video_list']['video_2']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['normal'] and 'video_1' in \
+                    video_info['videoResource']['normal']['video_list']:
+                video_url = video_info['videoResource']['normal']['video_list']['video_1']['backup_url_1']
+                audio_url = video_info['videoResource']['normal']['video_list']['video_1']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['video_list']['video_1']['vwidth']
+                video_height = video_info['videoResource']['normal']['video_list']['video_1']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+
+            elif 'dynamic_video' in video_info['videoResource']['normal'] \
+                    and 'dynamic_video_list' in video_info['videoResource']['normal']['dynamic_video'] \
+                    and 'dynamic_audio_list' in video_info['videoResource']['normal']['dynamic_video'] \
+                    and len(video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list']) != 0 \
+                    and len(video_info['videoResource']['normal']['dynamic_video']['dynamic_audio_list']) != 0:
+
+                video_url = video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list'][-1][
+                    'backup_url_1']
+                audio_url = video_info['videoResource']['normal']['dynamic_video']['dynamic_audio_list'][-1][
+                    'backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list'][-1][
+                    'vwidth']
+                video_height = video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list'][-1][
+                    'vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            else:
+                video_url_dict["video_url"] = ''
+                video_url_dict["audio_url"] = ''
+                video_url_dict["video_width"] = 0
+                video_url_dict["video_height"] = 0
+
+        else:
+            video_url_dict["video_url"] = ''
+            video_url_dict["audio_url"] = ''
+            video_url_dict["video_width"] = 0
+            video_url_dict["video_height"] = 0
+
+        return video_url_dict
+
+    @classmethod
+    def get_comment_cnt(cls, item_id):
+        url = "https://www.ixigua.com/tlb/comment/article/v5/tab_comments/?"
+        params = {
+            "tab_index": "0",
+            "count": "10",
+            "offset": "10",
+            "group_id": str(item_id),
+            "item_id": str(item_id),
+            "aid": "1768",
+            "msToken": "50-JJObWB07HfHs-BMJWT1eIDX3G-6lPSF_i-QwxBIXE9VVa-iN0jbEXR5pG2DKjXBmP299n6ZTuXzY-GAy968CCvouSAYIS4GzvGQT3pNlKNejr5G4-1g==",
+            "X-Bogus": "DFSzswVOyGtANVeWtCLMqR/F6q9U",
+            "_signature": cls.random_signature(),
+        }
+        headers = {
+            'authority': 'www.ixigua.com',
+            'accept': 'application/json, text/plain, */*',
+            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
+            'cache-control': 'no-cache',
+            'cookie': 'MONITOR_WEB_ID=67cb5099-a022-4ec3-bb8e-c4de6ba51dd0; passport_csrf_token=72b2574f3c99f8ba670e42df430218fd; passport_csrf_token_default=72b2574f3c99f8ba670e42df430218fd; sid_guard=c7472b508ea631823ba765a60cf8757f%7C1680867422%7C3024002%7CFri%2C+12-May-2023+11%3A37%3A04+GMT; uid_tt=c13f47d51767f616befe32fb3e9f485a; uid_tt_ss=c13f47d51767f616befe32fb3e9f485a; sid_tt=c7472b508ea631823ba765a60cf8757f; sessionid=c7472b508ea631823ba765a60cf8757f; sessionid_ss=c7472b508ea631823ba765a60cf8757f; sid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; ssid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; odin_tt=b893608d4dde2e1e8df8cd5d97a0e2fbeafc4ca762ac72ebef6e6c97e2ed19859bb01d46b4190ddd6dd17d7f9678e1de; SEARCH_CARD_MODE=7168304743566296612_0; support_webp=true; support_avif=false; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; tt_scid=7Pux7s634-z8DYvCM20y7KigwH5u7Rh6D9C-RROpnT.aGMEcz6Vsxp.oai47wJqa4f86; ttwid=1%7CHHtv2QqpSGuSu8r-zXF1QoWsvjmNi1SJrqOrZzg-UCY%7C1683858689%7Ca5223fe1500578e01e138a0d71d6444692018296c4c24f5885af174a65873c95; ixigua-a-s=3; msToken=50-JJObWB07HfHs-BMJWT1eIDX3G-6lPSF_i-QwxBIXE9VVa-iN0jbEXR5pG2DKjXBmP299n6ZTuXzY-GAy968CCvouSAYIS4GzvGQT3pNlKNejr5G4-1g==; __ac_nonce=0645dcbf0005064517440; __ac_signature=_02B4Z6wo00f01FEGmAwAAIDBKchzCGqn-MBRJpyAAHAjieFC5GEg6gGiwz.I4PRrJl7f0GcixFrExKmgt6QI1i1S-dQyofPEj2ugWTCnmKUdJQv-wYuDofeKNe8VtMtZq2aKewyUGeKU-5Ud21; ixigua-a-s=3',
+            'pragma': 'no-cache',
+            'referer': f'https://www.ixigua.com/{item_id}?logTag=3c5aa86a8600b9ab8540',
+            'sec-ch-ua': '"Microsoft Edge";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
+            'sec-ch-ua-mobile': '?0',
+            'sec-ch-ua-platform': '"macOS"',
+            'sec-fetch-dest': 'empty',
+            'sec-fetch-mode': 'cors',
+            'sec-fetch-site': 'same-origin',
+            'tt-anti-token': 'cBITBHvmYjEygzv-f9c78c1297722cf1f559c74b084e4525ce4900bdcf9e8588f20cc7c2e3234422',
+            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35',
+            'x-secsdk-csrf-token': '000100000001f8e733cf37f0cd255a51aea9a81ff7bc0c09490cfe41ad827c3c5c18ec809279175e4d9f5553d8a5'
+        }
+        urllib3.disable_warnings()
+        s = requests.session()
+        # max_retries=3 重试3次
+        s.mount('http://', HTTPAdapter(max_retries=3))
+        s.mount('https://', HTTPAdapter(max_retries=3))
+        response = s.get(url=url, headers=headers, params=params, verify=False, proxies=Common.tunnel_proxies(),
+                         timeout=5)
+        response.close()
+        if response.status_code != 200 or 'total_number' not in response.json() or response.json() == {}:
+            return 0
+        return response.json().get("total_number", 0)
+
+    # 获取视频详情
+    @classmethod
+    def get_video_info(cls, log_type, crawler, item_id):
+        url = 'https://www.ixigua.com/api/mixVideo/information?'
+        headers = {
+            "accept-encoding": "gzip, deflate",
+            "accept-language": "zh-CN,zh-Hans;q=0.9",
+            "user-agent": get_random_user_agent('pc'),
+            "referer": "https://www.ixigua.com/7102614741050196520?logTag=0531c88ac04f38ab2c62",
+        }
+        params = {
+            'mixId': str(item_id),
+            'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfC'
+                       'NVVIOBNjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
+            'X-Bogus': 'DFSzswVupYTANCJOSBk0P53WxM-r',
+            '_signature': '_02B4Z6wo0000119LvEwAAIDCuktNZ0y5wkdfS7jAALThuOR8D9yWNZ.EmWHKV0WSn6Px'
+                          'fPsH9-BldyxVje0f49ryXgmn7Tzk-swEHNb15TiGqa6YF.cX0jW8Eds1TtJOIZyfc9s5emH7gdWN94',
+        }
+        cookies = {
+            'ixigua-a-s': '1',
+            'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfCNVVIOB'
+                       'NjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
+            'ttwid': '1%7C_yXQeHWwLZgCsgHClOwTCdYSOt_MjdOkgnPIkpi-Sr8%7C1661241238%7Cf57d0c5ef3f1d7'
+                     '6e049fccdca1ac54887c34d1f8731c8e51a49780ff0ceab9f8',
+            'tt_scid': 'QZ4l8KXDG0YAEaMCSbADdcybdKbUfG4BC6S4OBv9lpRS5VyqYLX2bIR8CTeZeGHR9ee3',
+            'MONITOR_WEB_ID': '0a49204a-7af5-4e96-95f0-f4bafb7450ad',
+            '__ac_nonce': '06304878000964fdad287',
+            '__ac_signature': '_02B4Z6wo00f017Rcr3AAAIDCUVxeW1tOKEu0fKvAAI4cvoYzV-wBhq7B6D8k0no7lb'
+                              'FlvYoinmtK6UXjRIYPXnahUlFTvmWVtb77jsMkKAXzAEsLE56m36RlvL7ky.M3Xn52r9t1IEb7IR3ke8',
+            'ttcid': 'e56fabf6e85d4adf9e4d91902496a0e882',
+            '_tea_utm_cache_1300': 'undefined',
+            'support_avif': 'false',
+            'support_webp': 'false',
+            'xiguavideopcwebid': '7134967546256016900',
+            'xiguavideopcwebid.sig': 'xxRww5R1VEMJN_dQepHorEu_eAc',
+        }
+        urllib3.disable_warnings()
+        s = requests.session()
+        # max_retries=3 重试3次
+        s.mount('http://', HTTPAdapter(max_retries=3))
+        s.mount('https://', HTTPAdapter(max_retries=3))
+        response = s.get(url=url, headers=headers, params=params, cookies=cookies, verify=False,
+                         proxies=Common.tunnel_proxies(), timeout=5)
+        response.close()
+        if response.status_code != 200 or 'data' not in response.json() or response.json()['data'] == {}:
+            Common.logger(log_type, crawler).warning(f"get_video_info:{response.status_code}, {response.text}\n")
+            return None
+        else:
+            video_info = response.json()['data'].get("gidInformation", {}).get("packerData", {}).get("video", {})
+            if video_info == {}:
+                return None
+            video_dict = {
+                "video_title": video_info.get("title", ""),
+                "video_id": video_info.get("videoResource", {}).get("vid", ""),
+                "gid": str(item_id),
+                "play_cnt": int(video_info.get("video_watch_count", 0)),
+                "like_cnt": int(video_info.get("video_like_count", 0)),
+                "comment_cnt": int(cls.get_comment_cnt(item_id)),
+                "share_cnt": 0,
+                "favorite_cnt": 0,
+                "duration": int(video_info.get("video_duration", 0)),
+                "video_width": int(cls.get_video_url(video_info)["video_width"]),
+                "video_height": int(cls.get_video_url(video_info)["video_height"]),
+                "publish_time_stamp": int(video_info.get("video_publish_time", 0)),
+                "publish_time_str": time.strftime("%Y-%m-%d %H:%M:%S",
+                                                  time.localtime(int(video_info.get("video_publish_time", 0)))),
+                "user_name": video_info.get("user_info", {}).get("name", ""),
+                "user_id": str(video_info.get("user_info", {}).get("user_id", "")),
+                "avatar_url": str(video_info.get("user_info", {}).get("avatar_url", "")),
+                "cover_url": video_info.get("poster_url", ""),
+                "audio_url": cls.get_video_url(video_info)["audio_url"],
+                "video_url": cls.get_video_url(video_info)["video_url"],
+                "session": f"xigua-search-{int(time.time())}"
+            }
+            return video_dict
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, user_dict, rule_dict, env):
+        signature = cls.random_signature()
+        offset = 0
+        while True:
+            url = "https://www.ixigua.com/api/videov2/author/new_video_list?"
+            params = {
+                'to_user_id': str(user_dict["link"].replace("https://www.ixigua.com/home/", "")),
+                'offset': str(offset),
+                'limit': '30',
+                'maxBehotTime': '0',
+                'order': 'new',
+                'isHome': '0',
+                # 'msToken': 'G0eRzNkw189a8TLaXjc6nTHVMQwh9XcxVAqTbGKi7iPJdQcLwS3-XRrJ3MZ7QBfqErpxp3EX1WtvWOIcZ3NIgr41hgcd-v64so_RRj3YCRw1UsKW8mIssNLlIMspsg==',
+                # 'X-Bogus': 'DFSzswVuEkUANjW9ShFTgR/F6qHt',
+                '_signature': signature,
+            }
+            headers = {
+                'referer': f'https://www.ixigua.com/home/{user_dict["link"].replace("https://www.ixigua.com/home/", "")}/video/?preActiveKey=hotsoon&list_entrance=userdetail',
+                'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
+            }
+            urllib3.disable_warnings()
+            s = requests.session()
+            # max_retries=3 重试3次
+            s.mount('http://', HTTPAdapter(max_retries=3))
+            s.mount('https://', HTTPAdapter(max_retries=3))
+            response = s.get(url=url, headers=headers, params=params, proxies=Common.tunnel_proxies(), verify=False, timeout=5)
+            response.close()
+            offset += 30
+            if response.status_code != 200:
+                Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.text}\n")
+                return
+            elif 'data' not in response.text:
+                Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.text}\n")
+                return
+            elif not response.json()["data"]['videoList']:
+                Common.logger(log_type, crawler).warning(f"没有更多数据啦~:{response.json()}\n")
+                return
+            feeds = response.json()['data']['videoList']
+            for i in range(len(feeds)):
+                try:
+                    item_id = feeds[i].get("item_id", "")
+                    if item_id == "":
+                        Common.logger(log_type, crawler).info("无效视频\n")
+                        continue
+
+                    video_dict = cls.get_video_info(log_type, crawler, item_id)
+                    if video_dict is None:
+                        Common.logger(log_type, crawler).info("无效视频\n")
+                        continue
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    if int((int(time.time()) - int(video_dict["publish_time_stamp"])) / (3600 * 24)) > int(rule_dict.get("period", {}).get("max", 1000)):
+                        Common.logger(log_type, crawler).info(f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
+                        return
+                    if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                        Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                    elif any(str(word) if str(word) in video_dict["video_title"] else False
+                             for word in get_config_from_mysql(log_type=log_type,
+                                                               source=crawler,
+                                                               env=env,
+                                                               text="filter",
+                                                               action="")) is True:
+                        Common.logger(log_type, crawler).info('已中过滤词\n')
+                    elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
+                        Common.logger(log_type, crawler).info('视频已下载\n')
+                    else:
+                        cls.download_publish(log_type=log_type,
+                                             crawler=crawler,
+                                             user_dict=user_dict,
+                                             video_dict=video_dict,
+                                             rule_dict=rule_dict,
+                                             env=env)
+                except Exception as e:
+                    Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    # 下载 / 上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, user_dict, video_dict, rule_dict, env):
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text='xigua_video', title=video_dict['video_title'], url=video_dict['video_url'])
+        # 下载音频
+        Common.download_method(log_type=log_type, crawler=crawler, text='xigua_audio', title=video_dict['video_title'], url=video_dict['audio_url'])
+        # 合成音视频
+        Common.video_compose(log_type=log_type, crawler=crawler, video_dir=f"./{crawler}/videos/{video_dict['video_title']}")
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            return
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
+        # 保存视频信息至txt
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        if env == "dev":
+            oss_endpoint = "out"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="推荐抓取策略",
+                                                      our_uid=user_dict["uid"],
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        else:
+            oss_endpoint = "inner"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="推荐抓取策略",
+                                                      our_uid=user_dict["uid"],
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+
+        if our_video_id is None:
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
+
+        # 视频信息保存数据库
+        insert_sql = f""" insert into crawler_video(video_id,
+                        user_id,
+                        out_user_id,
+                        platform,
+                        strategy,
+                        out_video_id,
+                        video_title,
+                        cover_url,
+                        video_url,
+                        duration,
+                        publish_time,
+                        play_cnt,
+                        crawler_rule,
+                        width,
+                        height)
+                        values({our_video_id},
+                        {user_dict["uid"]},
+                        "{video_dict['user_id']}",
+                        "{cls.platform}",
+                        "定向爬虫策略",
+                        "{video_dict['video_id']}",
+                        "{video_dict['video_title']}",
+                        "{video_dict['cover_url']}",
+                        "{video_dict['video_url']}",
+                        {int(video_dict['duration'])},
+                        "{video_dict['publish_time_str']}",
+                        {int(video_dict['play_cnt'])},
+                        '{json.dumps(rule_dict)}',
+                        {int(video_dict['video_width'])},
+                        {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+        Common.logger(log_type, crawler).info('视频信息写入数据库成功')
+
+        # 视频写入飞书
+        Feishu.insert_columns(log_type, crawler, "e075e9", "ROWS", 1, 2)
+        upload_time = int(time.time())
+        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                   "定向榜",
+                   video_dict['video_title'],
+                   str(video_dict['video_id']),
+                   our_video_link,
+                   video_dict['gid'],
+                   video_dict['play_cnt'],
+                   video_dict['comment_cnt'],
+                   video_dict['like_cnt'],
+                   video_dict['share_cnt'],
+                   video_dict['duration'],
+                   str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
+                   video_dict['publish_time_str'],
+                   video_dict['user_name'],
+                   video_dict['user_id'],
+                   video_dict['avatar_url'],
+                   video_dict['cover_url'],
+                   video_dict['video_url'],
+                   video_dict['audio_url']]]
+        time.sleep(0.5)
+        Feishu.update_values(log_type, crawler, "e075e9", "F2:Z2", values)
+        Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+
+    @classmethod
+    def get_author_videos(cls, log_type, crawler, user_list, rule_dict, env):
+        for user_dict in user_list:
+            try:
+                Common.logger(log_type, crawler).info(f"开始抓取 {user_dict['nick_name']} 用户主页视频\n")
+                cls.get_videoList(log_type=log_type,
+                                  crawler=crawler,
+                                  user_dict=user_dict,
+                                  rule_dict=rule_dict,
+                                  env=env)
+            except Exception as e:
+                Common.logger(log_type, crawler).error(f"抓取{user_dict['nick_name']}视频时异常:{e}\n")
+
+
+if __name__ == '__main__':
+    print(XiguaauthorScheduling.repeat_video("follow", "xigua", "v0201ag10000ce3jcjbc77u8jsplpgrg", "dev"))
+    pass

+ 43 - 0
xigua/xigua_main/run_xigua_author_scheduling.py

@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/5/26
+import argparse
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.public import task_fun
+from common.scheduling_db import MysqlHelper
+from xigua.xigua_author.xigua_author_scheduling import XiguaauthorScheduling
+
+
+def main(log_type, crawler, task, env):
+    task_dict = task_fun(task)['task_dict']
+    rule_dict = task_fun(task)['rule_dict']
+    task_id = task_dict['task_id']
+    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+    # Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
+    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]}\n')
+    XiguaauthorScheduling.get_author_videos(log_type=log_type,
+                                            crawler=crawler,
+                                            rule_dict=rule_dict,
+                                            user_list=user_list,
+                                            env=env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取完一轮\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', default='recommend')  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler', default='kuaishou')  ## 添加参数
+    parser.add_argument('--task')  ## 添加参数
+    parser.add_argument('--env', default='prod')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         task=args.task,
+         env=args.env)

+ 45 - 0
xigua/xigua_main/run_xigua_search_scheduling.py

@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/5/26
+import argparse
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.public import task_fun
+from common.scheduling_db import MysqlHelper
+from xigua.xigua_search.xigua_search_scheduling import XiguasearchScheduling
+
+
+def main(log_type, crawler, task, env):
+    task_dict = task_fun(task)['task_dict']
+    rule_dict = task_fun(task)['rule_dict']
+    task_id = task_dict['task_id']
+    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+    # Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
+    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]}\n')
+    XiguasearchScheduling.get_search_videos(log_type=log_type,
+                                            crawler=crawler,
+                                            rule_dict=rule_dict,
+                                            user_list=user_list,
+                                            env=env)
+    os.system("ps aux | grep Chrome | grep -v grep | awk '{print $2}' | xargs kill -9")
+    os.system("ps aux | grep chromedriver | grep -v grep | awk '{print $2}' | xargs kill -9")
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取完一轮\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', default='recommend')  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler', default='kuaishou')  ## 添加参数
+    parser.add_argument('--task')  ## 添加参数
+    parser.add_argument('--env', default='prod')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         task=args.task,
+         env=args.env)

+ 31 - 28
xigua/xigua_recommend/xigua_recommend_scheduling.py

@@ -669,34 +669,37 @@ class XiguarecommendScheduling:
             else:
             else:
                 feeds = response.json()['data']['channelFeed']['Data']
                 feeds = response.json()['data']['channelFeed']['Data']
                 for i in range(len(feeds)):
                 for i in range(len(feeds)):
-                    item_id = feeds[i].get("data", {}).get("item_id", "")
-                    if item_id == "":
-                        Common.logger(log_type, crawler).info("无效视频\n")
-                        continue
-                    video_dict = cls.get_video_info(log_type, crawler, item_id)
-                    if video_dict is None:
-                        Common.logger(log_type, crawler).info("无效视频\n")
-                        continue
-                    for k, v in video_dict.items():
-                        Common.logger(log_type, crawler).info(f"{k}:{v}")
-                    if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
-                        Common.logger(log_type, crawler).info("不满足抓取规则\n")
-                    elif any(str(word) if str(word) in video_dict["video_title"] else False
-                             for word in get_config_from_mysql(log_type=log_type,
-                                                               source=crawler,
-                                                               env=env,
-                                                               text="filter",
-                                                               action="")) is True:
-                        Common.logger(log_type, crawler).info('已中过滤词\n')
-                    elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
-                        Common.logger(log_type, crawler).info('视频已下载\n')
-                    else:
-                        cls.download_publish(log_type=log_type,
-                                             crawler=crawler,
-                                             our_uid=our_uid,
-                                             video_dict=video_dict,
-                                             rule_dict=rule_dict,
-                                             env=env)
+                    try:
+                        item_id = feeds[i].get("data", {}).get("item_id", "")
+                        if item_id == "":
+                            Common.logger(log_type, crawler).info("无效视频\n")
+                            continue
+                        video_dict = cls.get_video_info(log_type, crawler, item_id)
+                        if video_dict is None:
+                            Common.logger(log_type, crawler).info("无效视频\n")
+                            continue
+                        for k, v in video_dict.items():
+                            Common.logger(log_type, crawler).info(f"{k}:{v}")
+                        if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                            Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                        elif any(str(word) if str(word) in video_dict["video_title"] else False
+                                 for word in get_config_from_mysql(log_type=log_type,
+                                                                   source=crawler,
+                                                                   env=env,
+                                                                   text="filter",
+                                                                   action="")) is True:
+                            Common.logger(log_type, crawler).info('已中过滤词\n')
+                        elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
+                            Common.logger(log_type, crawler).info('视频已下载\n')
+                        else:
+                            cls.download_publish(log_type=log_type,
+                                                 crawler=crawler,
+                                                 our_uid=our_uid,
+                                                 video_dict=video_dict,
+                                                 rule_dict=rule_dict,
+                                                 env=env)
+                    except Exception as e:
+                        Common.logger(log_type, crawler).error(f"抓取单条视频时异常:{e}\n")
 
 
     @classmethod
     @classmethod
     def download_publish(cls, log_type, crawler, our_uid, video_dict, rule_dict, env):
     def download_publish(cls, log_type, crawler, our_uid, video_dict, rule_dict, env):

+ 13 - 0
xigua/xigua_search/xigua_search_new.py

@@ -661,6 +661,7 @@ class XiguaSearchNew:
         chrome_options = webdriver.ChromeOptions()
         chrome_options = webdriver.ChromeOptions()
         chrome_options.add_argument(f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
         chrome_options.add_argument(f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
         chrome_options.add_argument("--headless")
         chrome_options.add_argument("--headless")
+        chrome_options.add_argument("--window-size=1920,1080")
         chrome_options.add_argument("--no-sandbox")
         chrome_options.add_argument("--no-sandbox")
         if env == "dev":
         if env == "dev":
             chromedriver = "/Users/wangkun/Downloads/chromedriver/chromedriver_v112/chromedriver"
             chromedriver = "/Users/wangkun/Downloads/chromedriver/chromedriver_v112/chromedriver"
@@ -672,6 +673,14 @@ class XiguaSearchNew:
         Common.logger(log_type, crawler).info(f"打开搜索页:{search_word}")
         Common.logger(log_type, crawler).info(f"打开搜索页:{search_word}")
         driver.get(f"https://www.ixigua.com/search/{search_word}/")
         driver.get(f"https://www.ixigua.com/search/{search_word}/")
         time.sleep(1)
         time.sleep(1)
+        if len(driver.find_elements(By.XPATH, '//*[@class="xg-notification-close"]')) != 0:
+            driver.find_element(By.XPATH, '//*[@class="xg-notification-close"]').click()
+        Common.logger(log_type, crawler).info("点击筛选")
+        driver.find_element(By.XPATH, '//*[@class="searchPageV2__header-icons-categories"]').click()
+        time.sleep(0.5)
+        Common.logger(log_type, crawler).info("点击最新排序")
+        driver.find_element(By.XPATH, '//*[@class="searchPageV2-category__wrapper"]/*[2]/*[1]').click()
+        time.sleep(1)
 
 
         index = 0
         index = 0
         while True:
         while True:
@@ -713,6 +722,10 @@ class XiguaSearchNew:
                         for k, v in video_dict.items():
                         for k, v in video_dict.items():
                             Common.logger(log_type, crawler).info(f"{k}:{v}")
                             Common.logger(log_type, crawler).info(f"{k}:{v}")
                         rule_dict = cls.get_rule_dict(log_type, crawler)
                         rule_dict = cls.get_rule_dict(log_type, crawler)
+                        if int((int(time.time()) - int(video_dict["publish_time_stamp"])) / (3600 * 24)) > int(rule_dict["publish_time"]):
+                            Common.logger(log_type, crawler).info(f'发布时间超过{int(rule_dict["publish_time"])}天\n')
+                            driver.quit()
+                            return
                         if cls.download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
                         if cls.download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
                             Common.logger(log_type, crawler).info("不满足抓取规则\n")
                             Common.logger(log_type, crawler).info("不满足抓取规则\n")
                         elif any(str(word) if str(word) in video_dict["video_title"] else False for word in cls.filter_words(log_type, crawler, env)) is True:
                         elif any(str(word) if str(word) in video_dict["video_title"] else False for word in cls.filter_words(log_type, crawler, env)) is True:

+ 787 - 0
xigua/xigua_search/xigua_search_scheduling.py

@@ -0,0 +1,787 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/5/26
+import base64
+import json
+import os
+import random
+import shutil
+import string
+import sys
+import time
+from hashlib import md5
+import requests
+import urllib3
+from requests.adapters import HTTPAdapter
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
+from selenium import webdriver
+from selenium.webdriver.common.by import By
+sys.path.append(os.getcwd())
+from common.scheduling_db import MysqlHelper
+from common.common import Common
+from common.feishu import Feishu
+from common.publish import Publish
+from common.public import get_config_from_mysql, download_rule
+from common.userAgent import get_random_user_agent
+
+
+class XiguasearchScheduling:
+    # 已下载视频数
+    download_cnt = 0
+    platform = "西瓜视频"
+
+    @classmethod
+    def random_signature(cls):
+        src_digits = string.digits  # string_数字
+        src_uppercase = string.ascii_uppercase  # string_大写字母
+        src_lowercase = string.ascii_lowercase  # string_小写字母
+        digits_num = random.randint(1, 6)
+        uppercase_num = random.randint(1, 26 - digits_num - 1)
+        lowercase_num = 26 - (digits_num + uppercase_num)
+        password = random.sample(src_digits, digits_num) + random.sample(src_uppercase, uppercase_num) + random.sample(
+            src_lowercase, lowercase_num)
+        random.shuffle(password)
+        new_password = 'AAAAAAAAAA' + ''.join(password)[10:-4] + 'AAAB'
+        new_password_start = new_password[0:18]
+        new_password_end = new_password[-7:]
+        if new_password[18] == '8':
+            new_password = new_password_start + 'w' + new_password_end
+        elif new_password[18] == '9':
+            new_password = new_password_start + 'x' + new_password_end
+        elif new_password[18] == '-':
+            new_password = new_password_start + 'y' + new_password_end
+        elif new_password[18] == '.':
+            new_password = new_password_start + 'z' + new_password_end
+        else:
+            new_password = new_password_start + 'y' + new_password_end
+        return new_password
+
+    @classmethod
+    def get_video_url(cls, video_info):
+        video_url_dict = {}
+        # video_url
+        if 'videoResource' not in video_info:
+            video_url_dict["video_url"] = ''
+            video_url_dict["audio_url"] = ''
+            video_url_dict["video_width"] = 0
+            video_url_dict["video_height"] = 0
+
+        elif 'dash_120fps' in video_info['videoResource']:
+            if "video_list" in video_info['videoResource']['dash_120fps'] and 'video_4' in \
+                    video_info['videoResource']['dash_120fps']['video_list']:
+                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_4']['backup_url_1']
+                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_4']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_4']['vwidth']
+                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_4']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_3' in \
+                    video_info['videoResource']['dash_120fps']['video_list']:
+                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_3']['backup_url_1']
+                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_3']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_3']['vwidth']
+                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_3']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_2' in \
+                    video_info['videoResource']['dash_120fps']['video_list']:
+                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_2']['backup_url_1']
+                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_2']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_2']['vwidth']
+                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_2']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_1' in \
+                    video_info['videoResource']['dash_120fps']['video_list']:
+                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_1']['backup_url_1']
+                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_1']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_1']['vwidth']
+                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_1']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+
+            elif 'dynamic_video' in video_info['videoResource']['dash_120fps'] \
+                    and 'dynamic_video_list' in video_info['videoResource']['dash_120fps']['dynamic_video'] \
+                    and 'dynamic_audio_list' in video_info['videoResource']['dash_120fps']['dynamic_video'] \
+                    and len(
+                video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list']) != 0 \
+                    and len(
+                video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_audio_list']) != 0:
+
+                video_url = \
+                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1][
+                        'backup_url_1']
+                audio_url = \
+                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_audio_list'][-1][
+                        'backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = \
+                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1][
+                        'vwidth']
+                video_height = \
+                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1][
+                        'vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            else:
+                video_url_dict["video_url"] = ''
+                video_url_dict["audio_url"] = ''
+                video_url_dict["video_width"] = 0
+                video_url_dict["video_height"] = 0
+
+        elif 'dash' in video_info['videoResource']:
+            if "video_list" in video_info['videoResource']['dash'] and 'video_4' in \
+                    video_info['videoResource']['dash']['video_list']:
+                video_url = video_info['videoResource']['dash']['video_list']['video_4']['backup_url_1']
+                audio_url = video_info['videoResource']['dash']['video_list']['video_4']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['video_list']['video_4']['vwidth']
+                video_height = video_info['videoResource']['dash']['video_list']['video_4']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash'] and 'video_3' in \
+                    video_info['videoResource']['dash']['video_list']:
+                video_url = video_info['videoResource']['dash']['video_list']['video_3']['backup_url_1']
+                audio_url = video_info['videoResource']['dash']['video_list']['video_3']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['video_list']['video_3']['vwidth']
+                video_height = video_info['videoResource']['dash']['video_list']['video_3']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash'] and 'video_2' in \
+                    video_info['videoResource']['dash']['video_list']:
+                video_url = video_info['videoResource']['dash']['video_list']['video_2']['backup_url_1']
+                audio_url = video_info['videoResource']['dash']['video_list']['video_2']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['video_list']['video_2']['vwidth']
+                video_height = video_info['videoResource']['dash']['video_list']['video_2']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash'] and 'video_1' in \
+                    video_info['videoResource']['dash']['video_list']:
+                video_url = video_info['videoResource']['dash']['video_list']['video_1']['backup_url_1']
+                audio_url = video_info['videoResource']['dash']['video_list']['video_1']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['video_list']['video_1']['vwidth']
+                video_height = video_info['videoResource']['dash']['video_list']['video_1']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+
+            elif 'dynamic_video' in video_info['videoResource']['dash'] \
+                    and 'dynamic_video_list' in video_info['videoResource']['dash']['dynamic_video'] \
+                    and 'dynamic_audio_list' in video_info['videoResource']['dash']['dynamic_video'] \
+                    and len(video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list']) != 0 \
+                    and len(video_info['videoResource']['dash']['dynamic_video']['dynamic_audio_list']) != 0:
+
+                video_url = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
+                    'backup_url_1']
+                audio_url = video_info['videoResource']['dash']['dynamic_video']['dynamic_audio_list'][-1][
+                    'backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
+                    'vwidth']
+                video_height = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
+                    'vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            else:
+                video_url_dict["video_url"] = ''
+                video_url_dict["audio_url"] = ''
+                video_url_dict["video_width"] = 0
+                video_url_dict["video_height"] = 0
+
+        elif 'normal' in video_info['videoResource']:
+            if "video_list" in video_info['videoResource']['normal'] and 'video_4' in \
+                    video_info['videoResource']['normal']['video_list']:
+                video_url = video_info['videoResource']['normal']['video_list']['video_4']['backup_url_1']
+                audio_url = video_info['videoResource']['normal']['video_list']['video_4']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['video_list']['video_4']['vwidth']
+                video_height = video_info['videoResource']['normal']['video_list']['video_4']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['normal'] and 'video_3' in \
+                    video_info['videoResource']['normal']['video_list']:
+                video_url = video_info['videoResource']['normal']['video_list']['video_3']['backup_url_1']
+                audio_url = video_info['videoResource']['normal']['video_list']['video_3']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['video_list']['video_3']['vwidth']
+                video_height = video_info['videoResource']['normal']['video_list']['video_3']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['normal'] and 'video_2' in \
+                    video_info['videoResource']['normal']['video_list']:
+                video_url = video_info['videoResource']['normal']['video_list']['video_2']['backup_url_1']
+                audio_url = video_info['videoResource']['normal']['video_list']['video_2']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['video_list']['video_2']['vwidth']
+                video_height = video_info['videoResource']['normal']['video_list']['video_2']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['normal'] and 'video_1' in \
+                    video_info['videoResource']['normal']['video_list']:
+                video_url = video_info['videoResource']['normal']['video_list']['video_1']['backup_url_1']
+                audio_url = video_info['videoResource']['normal']['video_list']['video_1']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['video_list']['video_1']['vwidth']
+                video_height = video_info['videoResource']['normal']['video_list']['video_1']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+
+            elif 'dynamic_video' in video_info['videoResource']['normal'] \
+                    and 'dynamic_video_list' in video_info['videoResource']['normal']['dynamic_video'] \
+                    and 'dynamic_audio_list' in video_info['videoResource']['normal']['dynamic_video'] \
+                    and len(video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list']) != 0 \
+                    and len(video_info['videoResource']['normal']['dynamic_video']['dynamic_audio_list']) != 0:
+
+                video_url = video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list'][-1][
+                    'backup_url_1']
+                audio_url = video_info['videoResource']['normal']['dynamic_video']['dynamic_audio_list'][-1][
+                    'backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list'][-1][
+                    'vwidth']
+                video_height = video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list'][-1][
+                    'vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            else:
+                video_url_dict["video_url"] = ''
+                video_url_dict["audio_url"] = ''
+                video_url_dict["video_width"] = 0
+                video_url_dict["video_height"] = 0
+
+        else:
+            video_url_dict["video_url"] = ''
+            video_url_dict["audio_url"] = ''
+            video_url_dict["video_width"] = 0
+            video_url_dict["video_height"] = 0
+
+        return video_url_dict
+
+    @classmethod
+    def get_comment_cnt(cls, item_id):
+        url = "https://www.ixigua.com/tlb/comment/article/v5/tab_comments/?"
+        params = {
+            "tab_index": "0",
+            "count": "10",
+            "offset": "10",
+            "group_id": str(item_id),
+            "item_id": str(item_id),
+            "aid": "1768",
+            "msToken": "50-JJObWB07HfHs-BMJWT1eIDX3G-6lPSF_i-QwxBIXE9VVa-iN0jbEXR5pG2DKjXBmP299n6ZTuXzY-GAy968CCvouSAYIS4GzvGQT3pNlKNejr5G4-1g==",
+            "X-Bogus": "DFSzswVOyGtANVeWtCLMqR/F6q9U",
+            "_signature": cls.random_signature(),
+        }
+        headers = {
+            'authority': 'www.ixigua.com',
+            'accept': 'application/json, text/plain, */*',
+            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
+            'cache-control': 'no-cache',
+            'cookie': 'MONITOR_WEB_ID=67cb5099-a022-4ec3-bb8e-c4de6ba51dd0; passport_csrf_token=72b2574f3c99f8ba670e42df430218fd; passport_csrf_token_default=72b2574f3c99f8ba670e42df430218fd; sid_guard=c7472b508ea631823ba765a60cf8757f%7C1680867422%7C3024002%7CFri%2C+12-May-2023+11%3A37%3A04+GMT; uid_tt=c13f47d51767f616befe32fb3e9f485a; uid_tt_ss=c13f47d51767f616befe32fb3e9f485a; sid_tt=c7472b508ea631823ba765a60cf8757f; sessionid=c7472b508ea631823ba765a60cf8757f; sessionid_ss=c7472b508ea631823ba765a60cf8757f; sid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; ssid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; odin_tt=b893608d4dde2e1e8df8cd5d97a0e2fbeafc4ca762ac72ebef6e6c97e2ed19859bb01d46b4190ddd6dd17d7f9678e1de; SEARCH_CARD_MODE=7168304743566296612_0; support_webp=true; support_avif=false; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; tt_scid=7Pux7s634-z8DYvCM20y7KigwH5u7Rh6D9C-RROpnT.aGMEcz6Vsxp.oai47wJqa4f86; ttwid=1%7CHHtv2QqpSGuSu8r-zXF1QoWsvjmNi1SJrqOrZzg-UCY%7C1683858689%7Ca5223fe1500578e01e138a0d71d6444692018296c4c24f5885af174a65873c95; ixigua-a-s=3; msToken=50-JJObWB07HfHs-BMJWT1eIDX3G-6lPSF_i-QwxBIXE9VVa-iN0jbEXR5pG2DKjXBmP299n6ZTuXzY-GAy968CCvouSAYIS4GzvGQT3pNlKNejr5G4-1g==; __ac_nonce=0645dcbf0005064517440; __ac_signature=_02B4Z6wo00f01FEGmAwAAIDBKchzCGqn-MBRJpyAAHAjieFC5GEg6gGiwz.I4PRrJl7f0GcixFrExKmgt6QI1i1S-dQyofPEj2ugWTCnmKUdJQv-wYuDofeKNe8VtMtZq2aKewyUGeKU-5Ud21; ixigua-a-s=3',
+            'pragma': 'no-cache',
+            'referer': f'https://www.ixigua.com/{item_id}?logTag=3c5aa86a8600b9ab8540',
+            'sec-ch-ua': '"Microsoft Edge";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
+            'sec-ch-ua-mobile': '?0',
+            'sec-ch-ua-platform': '"macOS"',
+            'sec-fetch-dest': 'empty',
+            'sec-fetch-mode': 'cors',
+            'sec-fetch-site': 'same-origin',
+            'tt-anti-token': 'cBITBHvmYjEygzv-f9c78c1297722cf1f559c74b084e4525ce4900bdcf9e8588f20cc7c2e3234422',
+            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35',
+            'x-secsdk-csrf-token': '000100000001f8e733cf37f0cd255a51aea9a81ff7bc0c09490cfe41ad827c3c5c18ec809279175e4d9f5553d8a5'
+        }
+        urllib3.disable_warnings()
+        s = requests.session()
+        # max_retries=3 重试3次
+        s.mount('http://', HTTPAdapter(max_retries=3))
+        s.mount('https://', HTTPAdapter(max_retries=3))
+        response = s.get(url=url, headers=headers, params=params, verify=False, proxies=Common.tunnel_proxies(), timeout=5)
+        response.close()
+        if response.status_code != 200 or 'total_number' not in response.json() or response.json() == {}:
+            return 0
+        return response.json().get("total_number", 0)
+
+    # 获取视频详情
+    @classmethod
+    def get_video_info(cls, log_type, crawler, item_id):
+        url = 'https://www.ixigua.com/api/mixVideo/information?'
+        headers = {
+            "accept-encoding": "gzip, deflate",
+            "accept-language": "zh-CN,zh-Hans;q=0.9",
+            "user-agent": get_random_user_agent('pc'),
+            "referer": "https://www.ixigua.com/7102614741050196520?logTag=0531c88ac04f38ab2c62",
+        }
+        params = {
+            'mixId': str(item_id),
+            'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfC'
+                       'NVVIOBNjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
+            'X-Bogus': 'DFSzswVupYTANCJOSBk0P53WxM-r',
+            '_signature': '_02B4Z6wo0000119LvEwAAIDCuktNZ0y5wkdfS7jAALThuOR8D9yWNZ.EmWHKV0WSn6Px'
+                          'fPsH9-BldyxVje0f49ryXgmn7Tzk-swEHNb15TiGqa6YF.cX0jW8Eds1TtJOIZyfc9s5emH7gdWN94',
+        }
+        cookies = {
+            'ixigua-a-s': '1',
+            'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfCNVVIOB'
+                       'NjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
+            'ttwid': '1%7C_yXQeHWwLZgCsgHClOwTCdYSOt_MjdOkgnPIkpi-Sr8%7C1661241238%7Cf57d0c5ef3f1d7'
+                     '6e049fccdca1ac54887c34d1f8731c8e51a49780ff0ceab9f8',
+            'tt_scid': 'QZ4l8KXDG0YAEaMCSbADdcybdKbUfG4BC6S4OBv9lpRS5VyqYLX2bIR8CTeZeGHR9ee3',
+            'MONITOR_WEB_ID': '0a49204a-7af5-4e96-95f0-f4bafb7450ad',
+            '__ac_nonce': '06304878000964fdad287',
+            '__ac_signature': '_02B4Z6wo00f017Rcr3AAAIDCUVxeW1tOKEu0fKvAAI4cvoYzV-wBhq7B6D8k0no7lb'
+                              'FlvYoinmtK6UXjRIYPXnahUlFTvmWVtb77jsMkKAXzAEsLE56m36RlvL7ky.M3Xn52r9t1IEb7IR3ke8',
+            'ttcid': 'e56fabf6e85d4adf9e4d91902496a0e882',
+            '_tea_utm_cache_1300': 'undefined',
+            'support_avif': 'false',
+            'support_webp': 'false',
+            'xiguavideopcwebid': '7134967546256016900',
+            'xiguavideopcwebid.sig': 'xxRww5R1VEMJN_dQepHorEu_eAc',
+        }
+        urllib3.disable_warnings()
+        s = requests.session()
+        # max_retries=3 重试3次
+        s.mount('http://', HTTPAdapter(max_retries=3))
+        s.mount('https://', HTTPAdapter(max_retries=3))
+        response = s.get(url=url, headers=headers, params=params, cookies=cookies, verify=False, proxies=Common.tunnel_proxies(), timeout=5)
+        response.close()
+        if response.status_code != 200 or 'data' not in response.json() or response.json()['data'] == {}:
+            Common.logger(log_type, crawler).warning(f"get_video_info:{response.status_code}, {response.text}\n")
+            return None
+        else:
+            video_info = response.json()['data'].get("gidInformation", {}).get("packerData", {}).get("video", {})
+            if video_info == {}:
+                return None
+            video_dict = {
+                "video_title": video_info.get("title", ""),
+                "video_id": video_info.get("videoResource", {}).get("vid", ""),
+                "gid": str(item_id),
+                "play_cnt": int(video_info.get("video_watch_count", 0)),
+                "like_cnt": int(video_info.get("video_like_count", 0)),
+                "comment_cnt": int(cls.get_comment_cnt(item_id)),
+                "share_cnt": 0,
+                "favorite_cnt": 0,
+                "duration": int(video_info.get("video_duration", 0)),
+                "video_width": int(cls.get_video_url(video_info)["video_width"]),
+                "video_height": int(cls.get_video_url(video_info)["video_height"]),
+                "publish_time_stamp": int(video_info.get("video_publish_time", 0)),
+                "publish_time_str": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(video_info.get("video_publish_time", 0)))),
+                "user_name": video_info.get("user_info", {}).get("name", ""),
+                "user_id": str(video_info.get("user_info", {}).get("user_id", "")),
+                "avatar_url": str(video_info.get("user_info", {}).get("avatar_url", "")),
+                "cover_url": video_info.get("poster_url", ""),
+                "audio_url": cls.get_video_url(video_info)["audio_url"],
+                "video_url": cls.get_video_url(video_info)["video_url"],
+                "session": f"xigua-search-{int(time.time())}"
+            }
+            return video_dict
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, user_dict, rule_dict, env):
+        # 打印请求配置
+        ca = DesiredCapabilities.CHROME
+        ca["goog:loggingPrefs"] = {"performance": "ALL"}
+        # # 不打开浏览器运行
+        chrome_options = webdriver.ChromeOptions()
+        chrome_options.add_argument(f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
+        chrome_options.add_argument("--headless")
+        chrome_options.add_argument("--window-size=1920,1080")
+        chrome_options.add_argument("--no-sandbox")
+        if env == "dev":
+            chromedriver = "/Users/wangkun/Downloads/chromedriver/chromedriver_v112/chromedriver"
+        else:
+            chromedriver = "/usr/bin/chromedriver"
+        # driver初始化
+        driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(chromedriver))
+        driver.implicitly_wait(10)
+        Common.logger(log_type, crawler).info(f"打开搜索页:{user_dict['nick_name']}")
+        driver.get(f"https://www.ixigua.com/search/{user_dict['nick_name']}/")
+        time.sleep(1)
+        # driver.get_screenshot_as_file(f"./{crawler}/logs/打开搜索页.jpg")
+        if len(driver.find_elements(By.XPATH, '//*[@class="xg-notification-close"]')) != 0:
+            driver.find_element(By.XPATH, '//*[@class="xg-notification-close"]').click()
+            time.sleep(1)
+        Common.logger(log_type, crawler).info("点击筛选")
+        driver.find_element(By.XPATH, '//*[@class="searchPageV2__header-icons-categories"]').click()
+        time.sleep(1)
+        Common.logger(log_type, crawler).info("点击最新排序")
+        driver.find_element(By.XPATH, '//*[@class="searchPageV2-category__wrapper"]/*[2]/*[1]').click()
+        time.sleep(1)
+        # driver.get_screenshot_as_file(f"./{crawler}/logs/已点击最新排序.jpg")
+
+        index = 0
+        num = 0
+        while True:
+            video_elements = driver.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard searchPageV2__card"]')
+            video_element_temp = video_elements[index:]
+            if len(video_element_temp) == 0:
+                Common.logger(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
+                driver.quit()
+                return
+            for i, video_element in enumerate(video_element_temp):
+                try:
+                    if cls.download_cnt >= int(rule_dict.get("videos_cnt", {}).get("min", 30)):
+                        Common.logger(log_type, crawler).info(f"搜索词: {user_dict['nick_name']},已下载视频数: {cls.download_cnt}\n")
+                        driver.quit()
+                        return
+                    if video_element is None:
+                        Common.logger(log_type, crawler).info('到底啦~\n')
+                        driver.quit()
+                        return
+                    num += 1
+                    Common.logger(log_type, crawler).info(f'拖动"视频"列表第{num}个至屏幕中间')
+                    driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})", video_element)
+                    time.sleep(1)
+                    item_id = video_element.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard__coverWrapper disableZoomAnimation"]')[index+i].get_attribute('href')
+                    item_id = item_id.split("com/")[-1].split("?&")[0]
+                    video_dict = cls.get_video_info(log_type, crawler, item_id)
+                    if video_dict is None:
+                        Common.logger(log_type, crawler).info("无效视频\n")
+                        continue
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    if int((int(time.time()) - int(video_dict["publish_time_stamp"])) / (3600 * 24)) > int(rule_dict.get("period", {}).get("max", 1000)):
+                        Common.logger(log_type, crawler).info(f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
+                        driver.quit()
+                        return
+                    if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                        Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                    elif any(str(word) if str(word) in video_dict["video_title"] else False
+                             for word in get_config_from_mysql(log_type=log_type,
+                                                               source=crawler,
+                                                               env=env,
+                                                               text="filter",
+                                                               action="")) is True:
+                        Common.logger(log_type, crawler).info('已中过滤词\n')
+                    elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
+                        Common.logger(log_type, crawler).info('视频已下载\n')
+                    else:
+                        cls.download_publish(log_type=log_type,
+                                             crawler=crawler,
+                                             user_dict=user_dict,
+                                             video_dict=video_dict,
+                                             rule_dict=rule_dict,
+                                             env=env)
+                except Exception as e:
+                    Common.logger(log_type, crawler).warning(f"抓取单条视频异常:{e}\n")
+
+            Common.logger(log_type, crawler).info('已抓取完一组视频,休眠3秒\n')
+            time.sleep(3)
+            index = index + len(video_element_temp)
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, action="")
+        return len(repeat_video)
+
+    # 下载 / 上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, user_dict, video_dict, rule_dict, env):
+
+        Common.download_method(log_type=log_type, crawler=crawler, text='xigua_video',
+                               title=video_dict['video_title'], url=video_dict['video_url'])
+        # 下载音频
+        Common.download_method(log_type=log_type, crawler=crawler, text='xigua_audio',
+                               title=video_dict['video_title'], url=video_dict['audio_url'])
+        # 合成音视频
+        Common.video_compose(log_type=log_type, crawler=crawler,
+                             video_dir=f"./{crawler}/videos/{video_dict['video_title']}")
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            return
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text='cover',
+                               title=video_dict['video_title'], url=video_dict['cover_url'])
+        # 保存视频信息至txt
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        if env == "dev":
+            oss_endpoint = "out"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="搜索抓取策略",
+                                                      our_uid=user_dict["uid"],
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        else:
+            oss_endpoint = "inner"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="搜索抓取策略",
+                                                      our_uid=user_dict["uid"],
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+
+        if our_video_id is None:
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
+
+        # 视频信息保存数据库
+        insert_sql = f""" insert into crawler_video(video_id,
+                                user_id,
+                                out_user_id,
+                                platform,
+                                strategy,
+                                out_video_id,
+                                video_title,
+                                cover_url,
+                                video_url,
+                                duration,
+                                publish_time,
+                                play_cnt,
+                                crawler_rule,
+                                width,
+                                height)
+                                values({our_video_id},
+                                {user_dict["uid"]},
+                                "{video_dict['user_id']}",
+                                "{cls.platform}",
+                                "搜索爬虫策略",
+                                "{video_dict['video_id']}",
+                                "{video_dict['video_title']}",
+                                "{video_dict['cover_url']}",
+                                "{video_dict['video_url']}",
+                                {int(video_dict['duration'])},
+                                "{video_dict['publish_time_str']}",
+                                {int(video_dict['play_cnt'])},
+                                '{json.dumps(rule_dict)}',
+                                {int(video_dict['video_width'])},
+                                {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env, action="")
+        cls.download_cnt += 1
+        Common.logger(log_type, crawler).info("视频信息写入数据库完成")
+
+        # 视频信息写入飞书
+        Feishu.insert_columns(log_type, crawler, "BUNvGC", "ROWS", 1, 2)
+        values = [[user_dict["nick_name"],
+            time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
+            "关键词搜索",
+            video_dict['video_title'],
+            str(video_dict['video_id']),
+            our_video_link,
+            video_dict['gid'],
+            video_dict['play_cnt'],
+            video_dict['comment_cnt'],
+            video_dict['like_cnt'],
+            video_dict['share_cnt'],
+            video_dict['duration'],
+            str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
+            video_dict['publish_time_str'],
+            video_dict['user_name'],
+            video_dict['user_id'],
+            video_dict['avatar_url'],
+            video_dict['cover_url'],
+            video_dict['video_url'],
+            video_dict['audio_url']]]
+        time.sleep(0.5)
+        Feishu.update_values(log_type, crawler, "BUNvGC", "E2:Z2", values)
+        Common.logger(log_type, crawler).info('视频信息写入飞书完成\n')
+
+    @classmethod
+    def get_search_videos(cls, log_type, crawler, user_list, rule_dict, env):
+        for user_dict in user_list:
+            try:
+                cls.download_cnt = 0
+                Common.logger(log_type, crawler).info(f"开始抓取 {user_dict['nick_name']} 视频\n")
+                cls.get_videoList(log_type=log_type,
+                                  crawler=crawler,
+                                  user_dict=user_dict,
+                                  rule_dict=rule_dict,
+                                  env=env)
+            except Exception as e:
+                Common.logger(log_type, crawler).error(f"抓取{user_dict['nick_name']}视频时异常:{e}\n")
+
+
+if __name__ == '__main__':
+
+    pass