Explorar o código

add xigua_search

wangkun hai 1 ano
pai
achega
565226dbe1

+ 2 - 0
.gitignore

@@ -3,12 +3,14 @@
 __pycache__/
 *.py[cod]
 *$py.class
+*.DS_Store
 
 # C extensions
 *.so
 
 # Distribution / packaging
 .Python
+*/__pycache__/
 env/
 build/
 develop-eggs/

+ 16 - 14
README.MD

@@ -71,11 +71,13 @@ ps aux | grep run_youtube | grep -v grep | awk '{print $2}' | xargs kill -9
 本机
 西瓜定向: sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="out" --env="prod" --machine="local" xigua/nohup.log
 西瓜推荐: sh ./main/scheduling_main.sh ./xigua/xigua_main/run_xigua_recommend.py --log_type="recommend" --crawler="xigua" --env="dev" xigua/logs/nohup-recommend.log
+西瓜搜索: sh main/scheduling_main.sh ./xigua/xigua_main/run_xigua_search_new.py --log_type="search" --crawler="xigua" --env="dev" xigua/logs/search-shell.log
 杀进程命令:
 ps aux | grep run_xigua
 ps aux | grep run_xigua | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep run_xigua_follow | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep run_xigua_recommend | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep run_xigua_search | grep -v grep | awk '{print $2}' | xargs kill -9
 ```
 
 #### 快手
@@ -95,7 +97,6 @@ ps aux | grep run_kuaishou
 ps aux | grep run_kuaishou | grep -v grep | awk '{print $2}' | xargs kill -9
 ```
 
-
 #### 小年糕
 ```commandline
 阿里云 102 服务器
@@ -122,7 +123,6 @@ ps aux | grep run_xiaoniangao_hour | grep -v grep | awk '{print $2}' | xargs kil
 ps aux | grep run_xiaoniangao_play | grep -v grep | awk '{print $2}' | xargs kill -9 
 ```
 
-
 #### 公众号
 ```commandline
 阿里云 102 服务器
@@ -140,7 +140,6 @@ ps aux | grep run_gongzhonghao
 ps aux | grep run_gongzhonghao | grep -v grep | awk '{print $2}' | xargs kill -9 
 ```
 
-
 #### 微信指数
 ```commandline
 获取站外标题, crontab定时脚本, 每天 12:00:00 点运行一次
@@ -165,7 +164,6 @@ ps aux | grep 微信 | grep -v grep | awk '{print $2}' | xargs kill -9
 
 ```
 
-
 #### 抖音
 ```commandline
 阿里云 102 服务器
@@ -211,24 +209,15 @@ MacAir 设备, crontab定时任务
 sh /Users/wangkun/Desktop/crawler/piaoquan_crawler/main/process_offline.sh "dev"
 cd /Users/piaoquan/Desktop/piaoquan_crawler/ && nohup python3 -u weixinzhishu/weixinzhishu_key/search_key_mac.py >> weixinzhishu/logs/nohup-search-key.log 2>&1 &
 检测进程
-ps aux | grep run_ganggangdouchuan
-ps aux | grep run_jixiangxingfu
-ps aux | grep run_zhongmiaoyinxin
-ps aux | grep run_zhiqingtiantiankan
-ps aux | grep search_key_mac
 ps aux | grep run_ganggangdouchuan | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep run_jixiangxingfu | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep run_zhongmiaoyinxin | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep run_zhiqingtiantiankan | grep -v grep | awk '{print $2}' | xargs kill -9
-ps aux | grep search_key_mac | grep -v grep | awk '{print $2}' | xargs kill -9
-ps aux | grep gongzhonghao | grep -v grep | awk '{print $2}' | xargs kill -9
-ps aux | grep xiaoniangao | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep Appium.app | grep -v grep | awk '{print $2}' | xargs kill -9
-ps aux | grep shipinhao_search | grep -v grep | awk '{print $2}' | xargs kill -9
 ```
 
+#### 视频号
 ```commandline
-视频号搜索
 正式环境
 00 00 * * * /bin/sh /Users/piaoquan/Desktop/piaoquan_crawler/shipinhao/shipinhao_main/run_shipinhao.sh shipinhao/shipinhao_main/run_shipinhao_search.py --log_type="search" --crawler="shipinhao" --env="prod"
 线下调试
@@ -236,4 +225,17 @@ sh shipinhao/shipinhao_main/run_shipinhao.sh shipinhao/shipinhao_main/run_shipin
 检测进程
 ps aux | grep shipinhao_search
 ps aux | grep shipinhao_search | grep -v grep | awk '{print $2}' | xargs kill -9
+```
+
+#### 爬虫进程监控: main/process.sh
+```commandline
+102 服务器: 
+* * * * * /usr/bin/sh /data5/piaoquan_crawler/main/process.sh "prod"  >>/data5/piaoquan_crawler/main/main_logs/run-process.log 2>&1
+线下调试: 
+sh main/process.sh "dev" >> main/main_logs/run-process.log 2>&1
+进程监控
+ps aux | grep search_key_mac | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep gongzhonghao | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep xiaoniangao | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep run_xigua_search | grep -v grep | awk '{print $2}' | xargs kill -9
 ```

+ 1 - 1
common/create_user.py

@@ -4,7 +4,7 @@
 import uuid, requests
 
 
-class Demo:
+class CreateUser:
     @classmethod
     def get_default_user(cls):
         url = "https://api-internal.piaoquantv.com/user-center/info/getDefaultUserInfo"

+ 32 - 31
main/process.sh

@@ -17,6 +17,7 @@ else
   log_path=${piaoquan_crawler_dir}main/main_logs/process-$(date +%Y-%m-%d).log
 fi
 
+time=$(date +%H:%M:%S)
 echo "$(date "+%Y-%m-%d %H:%M:%S") 开始监测爬虫进程状态" >> ${log_path}
 
 echo "$(date "+%Y-%m-%d %H:%M:%S") 更新环境变量..." >> ${log_path}
@@ -25,7 +26,6 @@ echo "$(date "+%Y-%m-%d %H:%M:%S") 更新环境变量完成!" >> ${log_path}
 
 # 公众号爬虫策略
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 公众号爬虫策略 1-100个账号 进程状态" >> ${log_path}
-#echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 公众号爬虫策略1-40个账号 进程状态" >> ${log_path}
 ps -ef | grep "run_gongzhonghao_follow.py" | grep -v "grep"
 if [ "$?" -eq 1 ];then
   echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
@@ -52,20 +52,6 @@ if [ "$?" -eq 1 ];then
 else
   echo "$(date "+%Y-%m-%d %H:%M:%S") 公众号爬虫策略101-145个账号 进程状态正常" >> ${log_path}
 fi
-#
-#echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 公众号爬虫策略81-121个账号 进程状态" >> ${log_path}
-#ps -ef | grep "run_gongzhonghao_follow_3.py" | grep -v "grep"
-#if [ "$?" -eq 1 ];then
-#  echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
-#  if [ ${env} = "dev" ];then
-#    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow_3.py --log_type="follow-3" --crawler="gongzhonghao" --env="dev" gongzhonghao/logs/nohup-follow-3.log
-#  else
-#    cd ${piaoquan_crawler_dir} && /usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow_3.py --log_type="follow-3" --crawler="gongzhonghao" --env="prod"  gongzhonghao/logs/nohup-follow-3.log
-#  fi
-#  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
-#else
-#  echo "$(date "+%Y-%m-%d %H:%M:%S") 公众号爬虫策略81-121个账号 进程状态正常" >> ${log_path}
-#fi
 
 # 小年糕定向爬虫策略
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 小年糕定向爬虫策略 进程状态" >> ${log_path}
@@ -119,9 +105,9 @@ ps -ef | grep "run_kuaishou_follow.py" | grep -v "grep"
 if [ "$?" -eq 1 ];then
   echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
   if [ ${env} = "dev" ];then
-    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="author" --crawler="kuaishou" --env="dev" xiaoniangao/nohup-play.log
+    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="author" --crawler="kuaishou" --env="dev" kuaishou/logs/nohup-follow.log
   else
-    cd ${piaoquan_crawler_dir} && /usr/bin/sh ./main/main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="author" --crawler="kuaishou" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" kuaishou/follow.log
+    cd ${piaoquan_crawler_dir} && /usr/bin/sh ./main/main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="author" --crawler="kuaishou" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" kuaishou/logs/nohup-follow.log
   fi
   echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
 else
@@ -134,41 +120,39 @@ ps -ef | grep "run_kuaishou_recommend.py" | grep -v "grep"
 if [ "$?" -eq 1 ];then
   echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
   if [ ${env} = "dev" ];then
-    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="author" --crawler="kuaishou" --env="dev" xiaoniangao/nohup-play.log
+    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="author" --crawler="kuaishou" --env="dev" kuaishou/logs/nohup-recommend.log
   else
-    cd ${piaoquan_crawler_dir} && /usr/bin/sh ./main/main.sh ./kuaishou/kuaishou_main/run_kuaishou_recommend.py --log_type="recommend" --crawler="kuaishou" --strategy="推荐爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" kuaishou/recommend.log
+    cd ${piaoquan_crawler_dir} && /usr/bin/sh ./main/main.sh ./kuaishou/kuaishou_main/run_kuaishou_recommend.py --log_type="recommend" --crawler="kuaishou" --strategy="推荐爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" kuaishou/logs/nohup-recommend.log
   fi
   echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
 else
   echo "$(date "+%Y-%m-%d %H:%M:%S") 快手推荐爬虫策略 进程状态正常" >> ${log_path}
 fi
 
-
 # 抖音推荐爬虫策略
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 抖音推荐爬虫策略 进程状态" >> ${log_path}
 ps -ef | grep "run_douyin_recommend.py" | grep -v "grep"
 if [ "$?" -eq 1 ];then
   echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
   if [ ${env} = "dev" ];then
-    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="author" --crawler="kuaishou" --env="dev" xiaoniangao/nohup-play.log
+    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="author" --crawler="kuaishou" --env="dev" douyin/logs/nohup-recommend.log
   else
-    cd ${piaoquan_crawler_dir} && /usr/bin/sh ./main/main.sh ./douyin/douyin_main/run_douyin_recommend.py --log_type="recommend" --crawler="douyin" --strategy="抖音推荐爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" douyin/recommend.log
+    cd ${piaoquan_crawler_dir} && /usr/bin/sh ./main/main.sh ./douyin/douyin_main/run_douyin_recommend.py --log_type="recommend" --crawler="douyin" --strategy="抖音推荐爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" douyin/logs/nohup-recommend.log
   fi
   echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
 else
   echo "$(date "+%Y-%m-%d %H:%M:%S") 抖音推荐爬虫策略 进程状态正常" >> ${log_path}
 fi
 
-##
 # 抖音定向爬虫策略
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 抖音定向爬虫策略 进程状态" >> ${log_path}
 ps -ef | grep "run_douyin_follow.py" | grep -v "grep"
 if [ "$?" -eq 1 ];then
   echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
   if [ ${env} = "dev" ];then
-    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./kuaishou/douyin_main/run_douyin_follow.py --log_type="author" --crawler="douyin" --env="dev" xiaoniangao/nohup-play.log
+    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./kuaishou/douyin_main/run_douyin_follow.py --log_type="author" --crawler="douyin" --env="dev" douyin/logs/nohup-follow.log
   else
-    cd ${piaoquan_crawler_dir} && /usr/bin/sh ./main/main.sh ./douyin/douyin_main/run_douyin_follow.py --log_type="author" --crawler="douyin" --strategy="抖音定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" douyin/author.log
+    cd ${piaoquan_crawler_dir} && /usr/bin/sh ./main/main.sh ./douyin/douyin_main/run_douyin_follow.py --log_type="author" --crawler="douyin" --strategy="抖音定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" douyin/logs/nohup-author.log
   fi
   echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
 else
@@ -181,9 +165,9 @@ ps -ef | grep "run_xigua_follow.py" | grep -v "grep"
 if [ "$?" -eq 1 ];then
   echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
   if [ ${env} = "dev" ];then
-    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="author" --crawler="xigua" --env="dev" xigua/follow.log
+    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="author" --crawler="xigua" --env="dev" xigua/logs/nohup-follow.log
   else
-    cd ${piaoquan_crawler_dir} && /usr/bin/sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="author" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" xigua/follow.log
+    cd ${piaoquan_crawler_dir} && /usr/bin/sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="author" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" xigua/logs/nohup-follow.log
   fi
   echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
 else
@@ -205,6 +189,24 @@ else
   echo "$(date "+%Y-%m-%d %H:%M:%S") 西瓜推荐榜爬虫策略 进程状态正常" >> ${log_path}
 fi
 
+# 西瓜搜索爬虫策略
+if [[ "$time" > "00:00:00" ]] && [[ "$time" < "20:10:00" ]]; then
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 西瓜搜索爬虫策略 进程状态" >> ${log_path}
+  ps -ef | grep "run_xigua_search_new" | grep -v "grep"
+  if [ "$?" -eq 1 ];then
+    echo "$(date "+%Y-%m-%d %H:%M:%S") 西瓜搜索爬虫策略, 异常停止, 正在重启!" >> ${log_path}
+    if [ ${env} = "dev" ];then
+      cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./xigua/xigua_main/run_xigua_search_new.py --log_type="search" --crawler="xigua" --env="dev" xigua/logs/nohup-search.log
+    else
+      cd ${piaoquan_crawler_dir} && /usr/bin/sh main/scheduling_main.sh ./xigua/xigua_main/run_xigua_search_new.py --log_type="search" --crawler="xigua" --env="prod" xigua/logs/nohup-search.log
+    fi
+    echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
+  else
+    echo "$(date "+%Y-%m-%d %H:%M:%S") 西瓜搜索爬虫策略 进程状态正常" >> ${log_path}
+  fi
+else
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 不在任务启动时间范围: 西瓜搜索爬虫" >> ${log_path}
+fi
 
 # youtube定向爬虫策略
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 youtube定向爬虫策略 进程状态" >> ${log_path}
@@ -212,16 +214,15 @@ ps -ef | grep "run_youtube_follow.py" | grep -v "grep"
 if [ "$?" -eq 1 ];then
   echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
   if [ ${env} = "dev" ];then
-    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./youtube/youtube_main/run_youtube_follow.py --log_type="author" --crawler="youtube" --env="dev" youtube/follow.log
+    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./youtube/youtube_main/run_youtube_follow.py --log_type="author" --crawler="youtube" --env="dev" youtube/logs/nohup-follow.log
   else
-    cd ${piaoquan_crawler_dir} && /usr/bin/sh ./main/main.sh ./youtube/youtube_main/run_youtube_follow.py --log_type="author" --crawler="youtube" --strategy="定向爬虫策略" --oss_endpoint="hk" --env="hk" --machine="aliyun_hk" youtube/follow.log
+    cd ${piaoquan_crawler_dir} && /usr/bin/sh ./main/main.sh ./youtube/youtube_main/run_youtube_follow.py --log_type="author" --crawler="youtube" --strategy="定向爬虫策略" --oss_endpoint="hk" --env="hk" --machine="aliyun_hk" youtube/logs/nohup-follow.log
   fi
   echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
 else
   echo "$(date "+%Y-%m-%d %H:%M:%S") youtube定向爬虫策略 进程状态正常" >> ${log_path}
 fi
 
-
 # 本山祝福小程序爬虫
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 本山祝福小程序爬虫 进程状态" >> ${log_path}
 ps -ef | grep "run_benshanzhufu" | grep -v "grep"
@@ -252,7 +253,7 @@ else
   echo "$(date "+%Y-%m-%d %H:%M:%S") 岁岁年年迎福气小程序爬虫 进程状态正常" >> ${log_path}
 fi
 
-# 微信指数 bot
+# 微信指数监控
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 微信指数 bot 爬虫 进程状态" >> ${log_path}
 ps -ef | grep "run_weixinzhishu_bot" | grep -v "grep"
 if [ "$?" -eq 1 ];then

+ 6 - 5
requirements.txt

@@ -1,12 +1,13 @@
+Appium_Python_Client==2.10.0
+atomac==1.2.0
 ffmpeg==1.4
 loguru==0.6.0
+lxml==4.9.1
 oss2==2.15.0
 psutil==5.9.2
+PyExecJS==1.5.1
 PyMySQL==1.0.2
+redis==4.5.1
 requests==2.27.1
-selenium~=4.2.0
+selenium==4.9.1
 urllib3==1.26.9
-Appium-Python-Client~=2.8.1
-atomac~=1.2.0
-lxml~=4.9.1
-redis~=4.5.1

BIN=BIN
xigua/.DS_Store


+ 8 - 8
xigua/xigua_follow/xigua_follow.py

@@ -914,17 +914,17 @@ class Follow:
             Common.video_compose(log_type=log_type, crawler=crawler,
                                  video_dir=f"./{crawler}/videos/{video_dict['video_title']}")
             md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+            try:
+                if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                    Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                    return
+            except FileNotFoundError:
                 # 删除视频文件夹
                 shutil.rmtree(f"./{crawler}/videos/{md_title}")
-                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
                 return
-            # ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
-            # if ffmpeg_dict is None or ffmpeg_dict['size'] == 0:
-            #     Common.logger(log_type, crawler).warning(f"下载的视频无效,已删除\n")
-            #     # 删除视频文件夹
-            #     shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-            #     return
             # 下载封面
             Common.download_method(log_type=log_type, crawler=crawler, text='cover',
                                    title=video_dict['video_title'], url=video_dict['cover_url'])

+ 28 - 0
xigua/xigua_main/run_xigua_search_new.py

@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/5/12
+import argparse
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.common import Common
+from xigua.xigua_search.xigua_search_new import XiguaSearchNew
+
+
+def main(log_type, crawler, env):
+    Common.logger(log_type, crawler).info('开始抓取 西瓜视频 搜索策略\n')
+    XiguaSearchNew.get_search_videos(log_type, crawler, env)
+    os.system("ps aux | grep Chrome | grep -v grep | awk '{print $2}' | xargs kill -9")
+    os.system("ps aux | grep chromedriver | grep -v grep | awk '{print $2}' | xargs kill -9")
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取完一轮\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    # print(args)
+    main(log_type=args.log_type, crawler=args.crawler, env=args.env)

+ 22 - 8
xigua/xigua_search/test.py

@@ -1,13 +1,27 @@
 import requests
 
-url = "https://www.ixigua.com/api/searchv2/complex/%E8%80%81%E4%BA%BA%E8%A1%A5%E8%B4%B4/10?order_type=publish_time&click_position=new"
+class Test:
+  @classmethod
+  def test_search(cls):
+    url = "https://www.ixigua.com/search/美国禁令/?logTag=423ac644324e5c15d0b4&tab_name=home"
+    headers = {
+      "cookie": "MONITOR_WEB_ID=67cb5099-a022-4ec3-bb8e-c4de6ba51dd0; passport_csrf_token=72b2574f3c99f8ba670e42df430218fd; passport_csrf_token_default=72b2574f3c99f8ba670e42df430218fd; sid_guard=c7472b508ea631823ba765a60cf8757f|1680867422|3024002|Fri,+12-May-2023+11:37:04+GMT; uid_tt=c13f47d51767f616befe32fb3e9f485a; uid_tt_ss=c13f47d51767f616befe32fb3e9f485a; sid_tt=c7472b508ea631823ba765a60cf8757f; sessionid=c7472b508ea631823ba765a60cf8757f; sessionid_ss=c7472b508ea631823ba765a60cf8757f; sid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; ssid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; odin_tt=b893608d4dde2e1e8df8cd5d97a0e2fbeafc4ca762ac72ebef6e6c97e2ed19859bb01d46b4190ddd6dd17d7f9678e1de; __ac_signature=_02B4Z6wo00f01cFG6wAAAIDAuYgABqMKchHBZu-AABRtEnCTGzn5TJAsKOsuT7sRkpwCdN8j7eYG90xzDd55F2cCSZ0PajfVHvgm.7NmCht3MpN9fpw444-hLirWhH5NEyo.T3R-WhtUn32C58; SEARCH_CARD_MODE=7168304743566296612_0; support_webp=true; support_avif=false; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; tt_scid=87i80MjOSI6anYlKjNm3I80Az2HQvlC28pTxBpTksNBJ.srUyoC9hHhDYrE8N6fE4b26; ttwid=1|HHtv2QqpSGuSu8r-zXF1QoWsvjmNi1SJrqOrZzg-UCY|1683773681|d51c586327656b27492c8f406dd2530c2b4d03c38c4010b2b9d3de5dc883998f; msToken=lewvwOnFOl5Z5z_VkMYd7d4N-5y5uY0j82_1tnhWnOav09INStsHQnr0U953YQ9LzowXSPNP7m6l0nv1faSF9VEsEHGWqTg47kXuZKu9L4brbN4pmDNqZMwZ-YVQWFs=; ixigua-a-s=1",
+      "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35"
+    }
+    response = requests.get(url=url, headers=headers)
+    print(response)
+    print(response.text)
 
-payload={}
-headers = {
-  'cookie': 'ttwid=1%7Cx_4RDmVTqp6BQ5Xy5AnuCZCQdDyDxv-fnMVWzj19VU0%7C1679382377%7C4e25692dc4b9d5dca56d690001d168b21ed028a9ac075808ab9262238cb405ee; ixigua-a-s=1',
-  'referer': 'https://www.ixigua.com/search/%E8%80%81%E4%BA%BA%E8%A1%A5%E8%B4%B4/?logTag=594535e3690f17a88cdb&tab_name=search'
-}
+  @classmethod
+  def test_dict(cls):
+    dict_1 = {}
+    if dict_1 == {}:
+      print("yes")
+    else:
+      print("no")
 
-response = requests.request("GET", url, headers=headers, data=payload)
 
-print(response.text)
+if __name__ == "__main__":
+  # Test.test_search()
+  Test.test_dict()
+  pass

+ 891 - 0
xigua/xigua_search/xigua_search_new.py

@@ -0,0 +1,891 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/17
+import base64
+import json
+import os
+import random
+import shutil
+import string
+import sys
+import time
+from hashlib import md5
+import requests
+import urllib3
+from requests.adapters import HTTPAdapter
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
+from selenium import webdriver
+from selenium.webdriver.common.by import By
+sys.path.append(os.getcwd())
+from common.scheduling_db import MysqlHelper
+from common.getuser import getUser
+from common.common import Common
+from common.feishu import Feishu
+from common.publish import Publish
+from common.public import get_config_from_mysql
+from common.userAgent import get_random_user_agent
+
+
+class XiguaSearchNew:
+    # 抓取视频数
+    i = 0
+    # 已下载视频数
+    videos_cnt = 0
+    platform = "西瓜视频"
+    tag = "西瓜视频爬虫,搜索爬虫策略"
+
+    @classmethod
+    def get_rule_dict(cls, log_type, crawler):
+        while True:
+            rule_sheet = Feishu.get_values_batch(log_type, crawler, "shxOl7")
+            if rule_sheet is None:
+                Common.logger(log_type, crawler).info(f"get_rule:{rule_sheet},2秒钟后重试")
+                time.sleep(2)
+                continue
+            rule_dict = {
+                "play_cnt": int(rule_sheet[1][2]),
+                "duration_min": int(rule_sheet[2][2]),
+                "duration_max": int(rule_sheet[3][2]),
+                "publish_time": int(rule_sheet[4][2]),
+                "like_cnt": int(rule_sheet[5][2]),
+                "comment_cnt": int(rule_sheet[6][2])
+            }
+            return rule_dict
+
+    # 下载规则
+    @classmethod
+    def download_rule(cls, log_type, crawler, video_dict, rule_dict):
+        Common.logger(log_type, crawler).info(f'play_cnt: {video_dict["play_cnt"]} >= {rule_dict["play_cnt"]}')
+        Common.logger(log_type, crawler).info(f'duration: {rule_dict["duration_max"]} >= {video_dict["duration"]} >= {rule_dict["duration_min"]}')
+        Common.logger(log_type, crawler).info(f'publish_time: {int(time.time())} - {video_dict["publish_time_stamp"]} = {int(time.time())-video_dict["publish_time_stamp"]} <= {rule_dict["publish_time"] * 3600 * 24}')
+        Common.logger(log_type, crawler).info(f'like_cnt: {video_dict["like_cnt"]} >= {rule_dict["like_cnt"]}')
+        Common.logger(log_type, crawler).info(f'comment_cnt: {video_dict["comment_cnt"]} >= {rule_dict["comment_cnt"]}')
+        if video_dict["play_cnt"] >= rule_dict["play_cnt"] \
+            and rule_dict["duration_max"] >= video_dict["duration"] >= rule_dict["duration_min"] \
+            and int(time.time()) - video_dict["publish_time_stamp"] <= rule_dict["publish_time"]*3600*24 \
+            and video_dict["like_cnt"] >= rule_dict["like_cnt"] \
+            and video_dict["comment_cnt"] >= rule_dict["comment_cnt"]:
+            return True
+        else:
+            return False
+
+    # 过滤词库
+    @classmethod
+    def filter_words(cls, log_type, crawler, env):
+        filter_words_list = get_config_from_mysql(log_type, crawler, env, "filter")
+        return filter_words_list
+
+    # 获取用户信息(字典格式). 注意:部分 user_id 字符类型是 int / str
+    @classmethod
+    def get_user_list(cls, log_type, crawler, sheetid, env):
+        try:
+            while True:
+                user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+                if user_sheet is None:
+                    Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet} 10秒钟后重试")
+                    continue
+                our_user_list = []
+                for i in range(1, len(user_sheet)):
+                    our_uid = user_sheet[i][6]
+                    search_word = user_sheet[i][4]
+                    tag1 = user_sheet[i][8]
+                    tag2 = user_sheet[i][9]
+                    tag3 = user_sheet[i][10]
+                    tag4 = user_sheet[i][11]
+                    tag5 = user_sheet[i][12]
+                    tag6 = user_sheet[i][13]
+                    tag7 = user_sheet[i][14]
+                    Common.logger(log_type, crawler).info(f"正在更新 {search_word} 关键词信息\n")
+                    if our_uid is None:
+                        default_user = getUser.get_default_user()
+                        # 用来创建our_id的信息
+                        user_dict = {
+                            'recommendStatus': -6,
+                            'appRecommendStatus': -6,
+                            'nickName': default_user['nickName'],
+                            'avatarUrl': default_user['avatarUrl'],
+                            'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5},{tag6},{tag7}',
+                        }
+                        Common.logger(log_type, crawler).info(f'新创建的站内UID:{our_uid}')
+                        our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
+                        if env == 'prod':
+                            our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
+                        else:
+                            our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
+                        Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
+                                             [[our_uid, our_user_link]])
+                        Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
+                    our_user_dict = {
+                        'out_uid': '',
+                        'search_word': search_word,
+                        'our_uid': our_uid,
+                        'our_user_link': f'https://admin.piaoquantv.com/ums/user/{our_uid}/post',
+                    }
+                    our_user_list.append(our_user_dict)
+
+                return our_user_list
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'get_user_id_from_feishu异常:{e}\n')
+
+    @classmethod
+    def videos_cnt_rule(cls, log_type, crawler):
+        while True:
+            videos_cnt_sheet = Feishu.get_values_batch(log_type, crawler, "shxOl7")
+            if videos_cnt_sheet is None:
+                time.sleep(2)
+                continue
+            return int(videos_cnt_sheet[7][2])
+
+    @classmethod
+    def random_signature(cls):
+        src_digits = string.digits  # string_数字
+        src_uppercase = string.ascii_uppercase  # string_大写字母
+        src_lowercase = string.ascii_lowercase  # string_小写字母
+        digits_num = random.randint(1, 6)
+        uppercase_num = random.randint(1, 26 - digits_num - 1)
+        lowercase_num = 26 - (digits_num + uppercase_num)
+        password = random.sample(src_digits, digits_num) + random.sample(src_uppercase, uppercase_num) + random.sample(
+            src_lowercase, lowercase_num)
+        random.shuffle(password)
+        new_password = 'AAAAAAAAAA' + ''.join(password)[10:-4] + 'AAAB'
+        new_password_start = new_password[0:18]
+        new_password_end = new_password[-7:]
+        if new_password[18] == '8':
+            new_password = new_password_start + 'w' + new_password_end
+        elif new_password[18] == '9':
+            new_password = new_password_start + 'x' + new_password_end
+        elif new_password[18] == '-':
+            new_password = new_password_start + 'y' + new_password_end
+        elif new_password[18] == '.':
+            new_password = new_password_start + 'z' + new_password_end
+        else:
+            new_password = new_password_start + 'y' + new_password_end
+        return new_password
+
+    @classmethod
+    def get_video_url(cls, video_info):
+        video_url_dict = {}
+        # video_url
+        if 'videoResource' not in video_info:
+            video_url_dict["video_url"] = ''
+            video_url_dict["audio_url"] = ''
+            video_url_dict["video_width"] = 0
+            video_url_dict["video_height"] = 0
+
+        elif 'dash_120fps' in video_info['videoResource']:
+            if "video_list" in video_info['videoResource']['dash_120fps'] and 'video_4' in \
+                    video_info['videoResource']['dash_120fps']['video_list']:
+                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_4']['backup_url_1']
+                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_4']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_4']['vwidth']
+                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_4']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_3' in \
+                    video_info['videoResource']['dash_120fps']['video_list']:
+                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_3']['backup_url_1']
+                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_3']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_3']['vwidth']
+                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_3']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_2' in \
+                    video_info['videoResource']['dash_120fps']['video_list']:
+                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_2']['backup_url_1']
+                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_2']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_2']['vwidth']
+                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_2']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_1' in \
+                    video_info['videoResource']['dash_120fps']['video_list']:
+                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_1']['backup_url_1']
+                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_1']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_1']['vwidth']
+                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_1']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+
+            elif 'dynamic_video' in video_info['videoResource']['dash_120fps'] \
+                    and 'dynamic_video_list' in video_info['videoResource']['dash_120fps']['dynamic_video'] \
+                    and 'dynamic_audio_list' in video_info['videoResource']['dash_120fps']['dynamic_video'] \
+                    and len(
+                video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list']) != 0 \
+                    and len(
+                video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_audio_list']) != 0:
+
+                video_url = \
+                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1][
+                        'backup_url_1']
+                audio_url = \
+                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_audio_list'][-1][
+                        'backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = \
+                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1][
+                        'vwidth']
+                video_height = \
+                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1][
+                        'vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            else:
+                video_url_dict["video_url"] = ''
+                video_url_dict["audio_url"] = ''
+                video_url_dict["video_width"] = 0
+                video_url_dict["video_height"] = 0
+
+        elif 'dash' in video_info['videoResource']:
+            if "video_list" in video_info['videoResource']['dash'] and 'video_4' in \
+                    video_info['videoResource']['dash']['video_list']:
+                video_url = video_info['videoResource']['dash']['video_list']['video_4']['backup_url_1']
+                audio_url = video_info['videoResource']['dash']['video_list']['video_4']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['video_list']['video_4']['vwidth']
+                video_height = video_info['videoResource']['dash']['video_list']['video_4']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash'] and 'video_3' in \
+                    video_info['videoResource']['dash']['video_list']:
+                video_url = video_info['videoResource']['dash']['video_list']['video_3']['backup_url_1']
+                audio_url = video_info['videoResource']['dash']['video_list']['video_3']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['video_list']['video_3']['vwidth']
+                video_height = video_info['videoResource']['dash']['video_list']['video_3']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash'] and 'video_2' in \
+                    video_info['videoResource']['dash']['video_list']:
+                video_url = video_info['videoResource']['dash']['video_list']['video_2']['backup_url_1']
+                audio_url = video_info['videoResource']['dash']['video_list']['video_2']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['video_list']['video_2']['vwidth']
+                video_height = video_info['videoResource']['dash']['video_list']['video_2']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash'] and 'video_1' in \
+                    video_info['videoResource']['dash']['video_list']:
+                video_url = video_info['videoResource']['dash']['video_list']['video_1']['backup_url_1']
+                audio_url = video_info['videoResource']['dash']['video_list']['video_1']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['video_list']['video_1']['vwidth']
+                video_height = video_info['videoResource']['dash']['video_list']['video_1']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+
+            elif 'dynamic_video' in video_info['videoResource']['dash'] \
+                    and 'dynamic_video_list' in video_info['videoResource']['dash']['dynamic_video'] \
+                    and 'dynamic_audio_list' in video_info['videoResource']['dash']['dynamic_video'] \
+                    and len(video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list']) != 0 \
+                    and len(video_info['videoResource']['dash']['dynamic_video']['dynamic_audio_list']) != 0:
+
+                video_url = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
+                    'backup_url_1']
+                audio_url = video_info['videoResource']['dash']['dynamic_video']['dynamic_audio_list'][-1][
+                    'backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
+                    'vwidth']
+                video_height = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
+                    'vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            else:
+                video_url_dict["video_url"] = ''
+                video_url_dict["audio_url"] = ''
+                video_url_dict["video_width"] = 0
+                video_url_dict["video_height"] = 0
+
+        elif 'normal' in video_info['videoResource']:
+            if "video_list" in video_info['videoResource']['normal'] and 'video_4' in \
+                    video_info['videoResource']['normal']['video_list']:
+                video_url = video_info['videoResource']['normal']['video_list']['video_4']['backup_url_1']
+                audio_url = video_info['videoResource']['normal']['video_list']['video_4']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['video_list']['video_4']['vwidth']
+                video_height = video_info['videoResource']['normal']['video_list']['video_4']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['normal'] and 'video_3' in \
+                    video_info['videoResource']['normal']['video_list']:
+                video_url = video_info['videoResource']['normal']['video_list']['video_3']['backup_url_1']
+                audio_url = video_info['videoResource']['normal']['video_list']['video_3']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['video_list']['video_3']['vwidth']
+                video_height = video_info['videoResource']['normal']['video_list']['video_3']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['normal'] and 'video_2' in \
+                    video_info['videoResource']['normal']['video_list']:
+                video_url = video_info['videoResource']['normal']['video_list']['video_2']['backup_url_1']
+                audio_url = video_info['videoResource']['normal']['video_list']['video_2']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['video_list']['video_2']['vwidth']
+                video_height = video_info['videoResource']['normal']['video_list']['video_2']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['normal'] and 'video_1' in \
+                    video_info['videoResource']['normal']['video_list']:
+                video_url = video_info['videoResource']['normal']['video_list']['video_1']['backup_url_1']
+                audio_url = video_info['videoResource']['normal']['video_list']['video_1']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['video_list']['video_1']['vwidth']
+                video_height = video_info['videoResource']['normal']['video_list']['video_1']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+
+            elif 'dynamic_video' in video_info['videoResource']['normal'] \
+                    and 'dynamic_video_list' in video_info['videoResource']['normal']['dynamic_video'] \
+                    and 'dynamic_audio_list' in video_info['videoResource']['normal']['dynamic_video'] \
+                    and len(video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list']) != 0 \
+                    and len(video_info['videoResource']['normal']['dynamic_video']['dynamic_audio_list']) != 0:
+
+                video_url = video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list'][-1][
+                    'backup_url_1']
+                audio_url = video_info['videoResource']['normal']['dynamic_video']['dynamic_audio_list'][-1][
+                    'backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list'][-1][
+                    'vwidth']
+                video_height = video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list'][-1][
+                    'vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            else:
+                video_url_dict["video_url"] = ''
+                video_url_dict["audio_url"] = ''
+                video_url_dict["video_width"] = 0
+                video_url_dict["video_height"] = 0
+
+        else:
+            video_url_dict["video_url"] = ''
+            video_url_dict["audio_url"] = ''
+            video_url_dict["video_width"] = 0
+            video_url_dict["video_height"] = 0
+
+        return video_url_dict
+
+    @classmethod
+    def get_comment_cnt(cls, item_id):
+        url = "https://www.ixigua.com/tlb/comment/article/v5/tab_comments/?"
+        params = {
+            "tab_index": "0",
+            "count": "10",
+            "offset": "10",
+            "group_id": str(item_id),
+            "item_id": str(item_id),
+            "aid": "1768",
+            "msToken": "50-JJObWB07HfHs-BMJWT1eIDX3G-6lPSF_i-QwxBIXE9VVa-iN0jbEXR5pG2DKjXBmP299n6ZTuXzY-GAy968CCvouSAYIS4GzvGQT3pNlKNejr5G4-1g==",
+            "X-Bogus": "DFSzswVOyGtANVeWtCLMqR/F6q9U",
+            "_signature": cls.random_signature(),
+        }
+        headers = {
+            'authority': 'www.ixigua.com',
+            'accept': 'application/json, text/plain, */*',
+            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
+            'cache-control': 'no-cache',
+            'cookie': 'MONITOR_WEB_ID=67cb5099-a022-4ec3-bb8e-c4de6ba51dd0; passport_csrf_token=72b2574f3c99f8ba670e42df430218fd; passport_csrf_token_default=72b2574f3c99f8ba670e42df430218fd; sid_guard=c7472b508ea631823ba765a60cf8757f%7C1680867422%7C3024002%7CFri%2C+12-May-2023+11%3A37%3A04+GMT; uid_tt=c13f47d51767f616befe32fb3e9f485a; uid_tt_ss=c13f47d51767f616befe32fb3e9f485a; sid_tt=c7472b508ea631823ba765a60cf8757f; sessionid=c7472b508ea631823ba765a60cf8757f; sessionid_ss=c7472b508ea631823ba765a60cf8757f; sid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; ssid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; odin_tt=b893608d4dde2e1e8df8cd5d97a0e2fbeafc4ca762ac72ebef6e6c97e2ed19859bb01d46b4190ddd6dd17d7f9678e1de; SEARCH_CARD_MODE=7168304743566296612_0; support_webp=true; support_avif=false; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; tt_scid=7Pux7s634-z8DYvCM20y7KigwH5u7Rh6D9C-RROpnT.aGMEcz6Vsxp.oai47wJqa4f86; ttwid=1%7CHHtv2QqpSGuSu8r-zXF1QoWsvjmNi1SJrqOrZzg-UCY%7C1683858689%7Ca5223fe1500578e01e138a0d71d6444692018296c4c24f5885af174a65873c95; ixigua-a-s=3; msToken=50-JJObWB07HfHs-BMJWT1eIDX3G-6lPSF_i-QwxBIXE9VVa-iN0jbEXR5pG2DKjXBmP299n6ZTuXzY-GAy968CCvouSAYIS4GzvGQT3pNlKNejr5G4-1g==; __ac_nonce=0645dcbf0005064517440; __ac_signature=_02B4Z6wo00f01FEGmAwAAIDBKchzCGqn-MBRJpyAAHAjieFC5GEg6gGiwz.I4PRrJl7f0GcixFrExKmgt6QI1i1S-dQyofPEj2ugWTCnmKUdJQv-wYuDofeKNe8VtMtZq2aKewyUGeKU-5Ud21; ixigua-a-s=3',
+            'pragma': 'no-cache',
+            'referer': f'https://www.ixigua.com/{item_id}?logTag=3c5aa86a8600b9ab8540',
+            'sec-ch-ua': '"Microsoft Edge";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
+            'sec-ch-ua-mobile': '?0',
+            'sec-ch-ua-platform': '"macOS"',
+            'sec-fetch-dest': 'empty',
+            'sec-fetch-mode': 'cors',
+            'sec-fetch-site': 'same-origin',
+            'tt-anti-token': 'cBITBHvmYjEygzv-f9c78c1297722cf1f559c74b084e4525ce4900bdcf9e8588f20cc7c2e3234422',
+            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35',
+            'x-secsdk-csrf-token': '000100000001f8e733cf37f0cd255a51aea9a81ff7bc0c09490cfe41ad827c3c5c18ec809279175e4d9f5553d8a5'
+        }
+        urllib3.disable_warnings()
+        s = requests.session()
+        # max_retries=3 重试3次
+        s.mount('http://', HTTPAdapter(max_retries=3))
+        s.mount('https://', HTTPAdapter(max_retries=3))
+        response = s.get(url=url, headers=headers, params=params, verify=False, proxies=Common.tunnel_proxies(), timeout=5)
+        response.close()
+        if response.status_code != 200 or 'total_number' not in response.json() or response.json() == {}:
+            return 0
+        return response.json().get("total_number", 0)
+
+    # 获取视频详情
+    @classmethod
+    def get_video_info(cls, log_type, crawler, item_id):
+        url = 'https://www.ixigua.com/api/mixVideo/information?'
+        headers = {
+            "accept-encoding": "gzip, deflate",
+            "accept-language": "zh-CN,zh-Hans;q=0.9",
+            "user-agent": get_random_user_agent('pc'),
+            "referer": "https://www.ixigua.com/7102614741050196520?logTag=0531c88ac04f38ab2c62",
+        }
+        params = {
+            'mixId': str(item_id),
+            'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfC'
+                       'NVVIOBNjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
+            'X-Bogus': 'DFSzswVupYTANCJOSBk0P53WxM-r',
+            '_signature': '_02B4Z6wo0000119LvEwAAIDCuktNZ0y5wkdfS7jAALThuOR8D9yWNZ.EmWHKV0WSn6Px'
+                          'fPsH9-BldyxVje0f49ryXgmn7Tzk-swEHNb15TiGqa6YF.cX0jW8Eds1TtJOIZyfc9s5emH7gdWN94',
+        }
+        cookies = {
+            'ixigua-a-s': '1',
+            'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfCNVVIOB'
+                       'NjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
+            'ttwid': '1%7C_yXQeHWwLZgCsgHClOwTCdYSOt_MjdOkgnPIkpi-Sr8%7C1661241238%7Cf57d0c5ef3f1d7'
+                     '6e049fccdca1ac54887c34d1f8731c8e51a49780ff0ceab9f8',
+            'tt_scid': 'QZ4l8KXDG0YAEaMCSbADdcybdKbUfG4BC6S4OBv9lpRS5VyqYLX2bIR8CTeZeGHR9ee3',
+            'MONITOR_WEB_ID': '0a49204a-7af5-4e96-95f0-f4bafb7450ad',
+            '__ac_nonce': '06304878000964fdad287',
+            '__ac_signature': '_02B4Z6wo00f017Rcr3AAAIDCUVxeW1tOKEu0fKvAAI4cvoYzV-wBhq7B6D8k0no7lb'
+                              'FlvYoinmtK6UXjRIYPXnahUlFTvmWVtb77jsMkKAXzAEsLE56m36RlvL7ky.M3Xn52r9t1IEb7IR3ke8',
+            'ttcid': 'e56fabf6e85d4adf9e4d91902496a0e882',
+            '_tea_utm_cache_1300': 'undefined',
+            'support_avif': 'false',
+            'support_webp': 'false',
+            'xiguavideopcwebid': '7134967546256016900',
+            'xiguavideopcwebid.sig': 'xxRww5R1VEMJN_dQepHorEu_eAc',
+        }
+        urllib3.disable_warnings()
+        s = requests.session()
+        # max_retries=3 重试3次
+        s.mount('http://', HTTPAdapter(max_retries=3))
+        s.mount('https://', HTTPAdapter(max_retries=3))
+        response = s.get(url=url, headers=headers, params=params, cookies=cookies, verify=False, proxies=Common.tunnel_proxies(), timeout=5)
+        response.close()
+        if response.status_code != 200 or 'data' not in response.json() or response.json()['data'] == {}:
+            Common.logger(log_type, crawler).warning(f"get_video_info:{response.status_code}, {response.text}\n")
+            return None
+        else:
+            video_info = response.json()['data'].get("gidInformation", {}).get("packerData", {}).get("video", {})
+            if video_info == {}:
+                return None
+            video_dict = {
+                "video_title": video_info.get("title", ""),
+                "video_id": video_info.get("videoResource", {}).get("vid", ""),
+                "gid": str(item_id),
+                "play_cnt": int(video_info.get("video_watch_count", 0)),
+                "like_cnt": int(video_info.get("video_like_count", 0)),
+                "comment_cnt": int(cls.get_comment_cnt(item_id)),
+                "share_cnt": 0,
+                "favorite_cnt": 0,
+                "duration": int(video_info.get("video_duration", 0)),
+                "video_width": int(cls.get_video_url(video_info)["video_width"]),
+                "video_height": int(cls.get_video_url(video_info)["video_height"]),
+                "publish_time_stamp": int(video_info.get("video_publish_time", 0)),
+                "publish_time_str": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(video_info.get("video_publish_time", 0)))),
+                "user_name": video_info.get("user_info", {}).get("name", ""),
+                "user_id": str(video_info.get("user_info", {}).get("user_id", "")),
+                "avatar_url": str(video_info.get("user_info", {}).get("avatar_url", "")),
+                "cover_url": video_info.get("poster_url", ""),
+                "audio_url": cls.get_video_url(video_info)["audio_url"],
+                "video_url": cls.get_video_url(video_info)["video_url"],
+                "session": f"xigua-search-{int(time.time())}"
+            }
+            return video_dict
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, search_word, our_uid, env):
+        # 打印请求配置
+        ca = DesiredCapabilities.CHROME
+        ca["goog:loggingPrefs"] = {"performance": "ALL"}
+        # # 不打开浏览器运行
+        chrome_options = webdriver.ChromeOptions()
+        chrome_options.add_argument(f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
+        chrome_options.add_argument("--headless")
+        chrome_options.add_argument("--no-sandbox")
+        if env == "dev":
+            chromedriver = "/Users/wangkun/Downloads/chromedriver/chromedriver_v112/chromedriver"
+        else:
+            chromedriver = "/usr/bin/chromedriver"
+        # driver初始化
+        driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(chromedriver))
+        driver.implicitly_wait(10)
+        Common.logger(log_type, crawler).info(f"打开搜索页:{search_word}")
+        driver.get(f"https://www.ixigua.com/search/{search_word}/")
+        time.sleep(1)
+
+        index = 0
+        while True:
+            video_elements = driver.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard searchPageV2__card"]')
+            video_element_temp = video_elements[index:]
+            if len(video_element_temp) == 0:
+                Common.logger(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
+                cls.i = 0
+                cls.videos_cnt = 0
+                driver.quit()
+                return
+            for i, video_element in enumerate(video_element_temp):
+                try:
+                    if cls.videos_cnt >= cls.videos_cnt_rule(log_type, crawler):
+                        Common.logger(log_type, crawler).info(f"搜索词: {search_word},已下载视频数: {cls.videos_cnt}\n")
+                        cls.i = 0
+                        cls.videos_cnt = 0
+                        driver.quit()
+                        return
+                    # Common.logger(log_type, crawler).info(f"i:{i}, video_element:{video_element}")
+                    if video_element is None:
+                        Common.logger(log_type, crawler).info('到底啦~\n')
+                        cls.i = 0
+                        cls.videos_cnt = 0
+                        driver.quit()
+                        return
+                    cls.i += 1
+                    Common.logger(log_type, crawler).info(f'拖动"视频"列表第{cls.i}个至屏幕中间')
+                    # Common.logger(log_type, crawler).info(f"video_elements:{len(video_elements)}")
+                    # Common.logger(log_type, crawler).info(f"index+i:{index+i}")
+                    driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})", video_element)
+                    time.sleep(1)
+                    item_id = video_element.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard__coverWrapper disableZoomAnimation"]')[index+i].get_attribute('href')
+                    item_id = item_id.split("com/")[-1].split("?&")[0]
+                    video_dict = cls.get_video_info(log_type, crawler, item_id)
+                    if video_dict is None:
+                        Common.logger(log_type, crawler).info("无效视频")
+                    else:
+                        for k, v in video_dict.items():
+                            Common.logger(log_type, crawler).info(f"{k}:{v}")
+                        rule_dict = cls.get_rule_dict(log_type, crawler)
+                        if cls.download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                            Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                        elif any(str(word) if str(word) in video_dict["video_title"] else False for word in cls.filter_words(log_type, crawler, env)) is True:
+                            Common.logger(log_type, crawler).info("已中过滤词\n")
+                        elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
+                            Common.logger(log_type, crawler).info("视频已下载\n")
+                        else:
+                            cls.download_publish(log_type=log_type,
+                                                 crawler=crawler,
+                                                 search_word=search_word,
+                                                 video_dict=video_dict,
+                                                 rule_dict=rule_dict,
+                                                 our_uid=our_uid,
+                                                 env=env)
+                except Exception as e:
+                    Common.logger(log_type, crawler).warning(f"抓取单条视频异常:{e}\n")
+
+            Common.logger(log_type, crawler).info('已抓取完一组视频,休眠3秒\n')
+            time.sleep(3)
+            index = index + len(video_element_temp)
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, action="")
+        return len(repeat_video)
+
+    # 下载 / 上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, search_word, video_dict, rule_dict, our_uid, env):
+
+        Common.download_method(log_type=log_type, crawler=crawler, text='xigua_video',
+                               title=video_dict['video_title'], url=video_dict['video_url'])
+        # 下载音频
+        Common.download_method(log_type=log_type, crawler=crawler, text='xigua_audio',
+                               title=video_dict['video_title'], url=video_dict['audio_url'])
+        # 合成音视频
+        Common.video_compose(log_type=log_type, crawler=crawler,
+                             video_dir=f"./{crawler}/videos/{video_dict['video_title']}")
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            return
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text='cover',
+                               title=video_dict['video_title'], url=video_dict['cover_url'])
+        # 保存视频信息至txt
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        if env == "dev":
+            oss_endpoint = "out"
+        else:
+            oss_endpoint = "inner"
+        our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                  crawler=crawler,
+                                                  strategy="搜索爬虫策略",
+                                                  our_uid=our_uid,
+                                                  env=env,
+                                                  oss_endpoint=oss_endpoint)
+        if env == 'dev':
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        else:
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        Common.logger(log_type, crawler).info("视频上传完成")
+
+        if our_video_id is None:
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
+
+        # 视频信息保存数据库
+        insert_sql = f""" insert into crawler_video(video_id,
+                                user_id,
+                                out_user_id,
+                                platform,
+                                strategy,
+                                out_video_id,
+                                video_title,
+                                cover_url,
+                                video_url,
+                                duration,
+                                publish_time,
+                                play_cnt,
+                                crawler_rule,
+                                width,
+                                height)
+                                values({our_video_id},
+                                {our_uid},
+                                "{video_dict['user_id']}",
+                                "{cls.platform}",
+                                "搜索爬虫策略",
+                                "{video_dict['video_id']}",
+                                "{video_dict['video_title']}",
+                                "{video_dict['cover_url']}",
+                                "{video_dict['video_url']}",
+                                {int(video_dict['duration'])},
+                                "{video_dict['publish_time_str']}",
+                                {int(video_dict['play_cnt'])},
+                                '{json.dumps(rule_dict)}',
+                                {int(video_dict['video_width'])},
+                                {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env, action="")
+        Common.logger(log_type, crawler).info("视频信息写入数据库完成")
+
+        # 视频信息写入飞书
+        Feishu.insert_columns(log_type, crawler, "BUNvGC", "ROWS", 1, 2)
+        values = [[
+            search_word,
+            time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
+            "关键词搜索",
+            video_dict['video_title'],
+            str(video_dict['video_id']),
+            our_video_link,
+            video_dict['gid'],
+            video_dict['play_cnt'],
+            video_dict['comment_cnt'],
+            video_dict['like_cnt'],
+            video_dict['share_cnt'],
+            video_dict['duration'],
+            str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
+            video_dict['publish_time_str'],
+            video_dict['user_name'],
+            video_dict['user_id'],
+            video_dict['avatar_url'],
+            video_dict['cover_url'],
+            video_dict['video_url'],
+            video_dict['audio_url']]]
+        time.sleep(0.5)
+        Feishu.update_values(log_type, crawler, "BUNvGC", "E2:Z2", values)
+        Common.logger(log_type, crawler).info('视频信息写入飞书完成\n')
+        cls.videos_cnt += 1
+
+    @classmethod
+    def get_search_videos(cls, log_type, crawler, env):
+        user_list = cls.get_user_list(log_type=log_type, crawler=crawler, sheetid="SSPNPW", env=env)
+        for user in user_list:
+            try:
+                cls.i = 0
+                cls.videos_cnt = 0
+                search_word = user["search_word"]
+                our_uid = user["our_uid"]
+                Common.logger(log_type, crawler).info(f"开始抓取 {search_word} 视频\n")
+                cls.get_videoList(log_type=log_type,
+                                  crawler=crawler,
+                                  search_word=search_word,
+                                  our_uid=our_uid,
+                                  env=env)
+            except Exception as e:
+                Common.logger(log_type, crawler).error(f"get_search_videos:{e}\n")
+
+
+if __name__ == '__main__':
+    # XiguaSearch.get_search_videos('search', 'xigua', 'dev')
+    # XiguaSearch.get_videoList("search", "xigua", "长寿食物", "dev")
+    # XiguaSearch.get_video_info("search", "xigua", "7027495456829768196")
+    # print(XiguaSearch.get_comment_cnt("7027495456829768196"))
+    # print(XiguaSearch.videos_cnt_rule("search", "xigua"))
+    # XiguaSearch.filter_words('search', 'xigua', 'dev')
+    # print(XiguaSearchNew.get_rule_dict('search', 'xigua'))
+    # os.system("ps aux | grep Chrome | grep -v grep | awk '{print $2}' | xargs kill -9")
+    pass
+