Jelajahi Sumber

update xigua_recommend

wangkun 2 tahun lalu
induk
melakukan
40e3f2e8da

+ 6 - 7
README.MD

@@ -61,17 +61,16 @@ ps aux | grep run_youtube | grep -v grep | awk '{print $2}' | xargs kill -9
 #### 西瓜视频
 ```commandline
 阿里云 102 服务器
-sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" xigua/nohup.log
-# sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --env="prod" --machine="aliyun" xigua/nohup.log
+西瓜定向: sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" xigua/nohup.log
+西瓜推荐: /usr/bin/sh ./main/scheduling_main.sh ./xigua/xigua_main/run_xigua_recommend.py --log_type="recommend" --crawler="xigua" --env="prod" xigua/logs/nohup-recommend.log
 本机
-sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="out" --env="prod" --machine="local" xigua/nohup.log
-# sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --env="prod" --machine="local" xigua/nohup.log
-macpro
-sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="out" --env="prod" --machine="macpro" xigua/nohup.log
-# sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --env="prod" --machine="macpro" xigua/nohup.log
+西瓜定向: sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="out" --env="prod" --machine="local" xigua/nohup.log
+西瓜推荐: sh ./main/scheduling_main.sh ./xigua/xigua_main/run_xigua_recommend.py --log_type="recommend" --crawler="xigua" --env="dev" xigua/logs/nohup-recommend.log
 杀进程命令:
 ps aux | grep run_xigua
 ps aux | grep run_xigua | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep run_xigua_follow | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep run_xigua_recommend | grep -v grep | awk '{print $2}' | xargs kill -9
 ```
 
 #### 快手

+ 3 - 13
common/publish.py

@@ -192,19 +192,9 @@ class Publish:
             uids_prod_gongzhonghao_follow = [26117675, 26117676, 26117677, 26117678, 26117679, 26117680]
             return random.choice(uids_prod_gongzhonghao_follow)
 
-        elif crawler == 'kanyikan':
-            uids_prod_kanyikan_moment = [20631208, 20631209, 20631210, 20631211, 20631212,
-                                         20631213, 20631214, 20631215, 20631216, 20631217,
-                                         20631223, 20631224, 20631225, 20631226, 20631227]
-            return random.choice(uids_prod_kanyikan_moment)
-
-        elif crawler == 'ggdc' and env == 'prod' and strategy == 'kanyikan_recommend':
-            uids_ggdc_prod_recommend = [26117661, 26117662, 26117663]
-            return random.choice(uids_ggdc_prod_recommend)
-
-        elif crawler == 'ggdc' and env == 'prod' and strategy == 'follow':
-            uids_ggdc_prod_follow = [26117661, 26117662, 26117663]
-            return random.choice(uids_ggdc_prod_follow)
+        elif crawler == 'xigua' and env == 'prod' and strategy == '推荐榜爬虫策略':
+            uids_prod_gongzhonghao_follow = [50322238]
+            return random.choice(uids_prod_gongzhonghao_follow)
 
         else:
             return our_uid

+ 15 - 0
main/process.sh

@@ -158,6 +158,21 @@ else
   echo "$(date "+%Y-%m-%d %H:%M:%S") 西瓜定向爬虫策略 进程状态正常" >> ${log_path}
 fi
 
+# 西瓜推荐榜爬虫策略
+echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 西瓜推荐榜爬虫策略 进程状态" >> ${log_path}
+ps -ef | grep "run_xigua_recommend.py" | grep -v "grep"
+if [ "$?" -eq 1 ];then
+  echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
+  if [ ${env} = "dev" ];then
+    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./xigua/xigua_main/run_xigua_recommend.py --log_type="recommend" --crawler="xigua" --env="dev" xigua/logs/nohup-recommend.log
+  else
+    cd ${piaoquan_crawler_dir} && /usr/bin/sh main/scheduling_main.sh ./xigua/xigua_main/run_xigua_recommend.py --log_type="recommend" --crawler="xigua" --env="prod" xigua/logs/nohup-recommend.log
+  fi
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
+else
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 西瓜推荐榜爬虫策略 进程状态正常" >> ${log_path}
+fi
+
 
 # youtube定向爬虫策略
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 youtube定向爬虫策略 进程状态" >> ${log_path}

+ 1 - 1
weixinzhishu/weixinzhishu_score/weixinzhishu_score.py

@@ -241,6 +241,6 @@ class Weixinzhishu:
 
 
 if __name__ == "__main__":
-    Weixinzhishu.get_score_test('weixin', 'weixinzhishu', 1 , "根本")
+    Weixinzhishu.get_score_test('weixin', 'weixinzhishu', 1 , "乌克兰")
 
     pass

TEMPAT SAMPAH
xigua/videos/.DS_Store → xigua/logs/.DS_Store


+ 30 - 0
xigua/xigua_main/run_xigua_recommend.py

@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/11
+import argparse
+import os
+import sys
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from xigua.xigua_recommend.xigua_recommend import XiguaRecommend
+
+
+def main(log_type, crawler, env):
+    if env == "dev":
+        oss_endpoint = "out"
+    else:
+        oss_endpoint = "inner"
+    Common.logger(log_type, crawler).info('开始抓取 西瓜视频 推荐榜\n')
+    XiguaRecommend.get_videolist(log_type, crawler, oss_endpoint, env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取完一轮,休眠 1 分钟\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type, crawler=args.crawler, env=args.env)

+ 0 - 27
xigua/xigua_recommend/demo.py

@@ -1,27 +0,0 @@
-import os
-import sys
-
-sys.path.append(os.getcwd())
-from common.scheduling_db import MysqlHelper
-
-
-class Demo:
-    @classmethod
-    def get_config(cls, log_type, crawler, env):
-        select_sql = f"""select * from crawler_config where source="xigua" """
-        contents = MysqlHelper.get_values(log_type, crawler, select_sql, env, action='')
-        for content in contents:
-            config = content['config']
-            print(type(config))
-            print(config)
-            print(type(eval(config)))
-            token = eval(config['token'])
-            # emoji = config['emoji']
-            # filter = config['filter']
-            print(f"token:{token}")
-            # print(f"emoji:{emoji}")
-            # print(f"filter:{filter}")
-
-
-if __name__ == "__main__":
-    Demo.get_config("demo", "xiaoniangao", "dev")

+ 78 - 0
xigua/xigua_recommend/insert.py

@@ -0,0 +1,78 @@
+import json
+import os
+import sys
+import time
+from datetime import date, timedelta
+sys.path.append(os.getcwd())
+from common.scheduling_db import MysqlHelper
+from common.feishu import Feishu
+
+
+class Demo:
+    @classmethod
+    def get_config(cls, log_type, crawler, text, env):
+        select_sql = f"""select * from crawler_config where source="xigua" """
+        contents = MysqlHelper.get_values(log_type, crawler, select_sql, env, action='')
+        title_list = []
+        filter_list = []
+        for content in contents:
+            config = content['config']
+            config_dict = eval(config)
+            for k, v in config_dict.items():
+                if k == "title":
+                    title_list_config = v.split(",")
+                    for title in title_list_config:
+                        title_list.append(title)
+                if k == "filter":
+                    filter_list_config = v.split(",")
+                    for filter_word in filter_list_config:
+                        filter_list.append(filter_word)
+        if text == "title":
+            return title_list
+        elif text == "filter":
+            return filter_list
+
+    @classmethod
+    def before_day(cls):
+        publish_time_str_rule = (date.today() + timedelta(days=-30)).strftime("%Y-%m-%d %H:%M:%S")
+        publish_time_stamp_rule = int(time.mktime(time.strptime(publish_time_str_rule, "%Y-%m-%d %H:%M:%S")))
+        print(publish_time_str_rule)
+        print(publish_time_stamp_rule)
+
+    @classmethod
+    def insert_config(cls, log_type, crawler, env):
+        filter_sheet = Feishu.get_values_batch(log_type, crawler, "KGB4Hc")
+        title_sheet = Feishu.get_values_batch(log_type, crawler, "bHSW1p")
+        filter_list = []
+        title_list = []
+        for x in filter_sheet:
+            for y in x:
+                if y is None:
+                    pass
+                else:
+                    filter_list.append(y)
+        for x in title_sheet:
+            for y in x:
+                if y is None:
+                    pass
+                else:
+                    title_list.append(y)
+        str_title = ','.join(title_list)
+        str_filter = ','.join(filter_list)
+        config_dict = {
+            "title": str_title,
+            "filter": str_filter
+        }
+        str_config_dict = str(config_dict)
+        # print(f"config_dict:{config_dict}")
+        # print(f"str_config_dict:{str_config_dict}")
+        insert_sql = f""" insert into crawler_config(title, source, config) values("西瓜视频", "xigua", "{str_config_dict}") """
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+
+
+
+if __name__ == "__main__":
+    # Demo.get_config("demo", "xiaoniangao", "dev")
+    # Demo.before_day()
+    Demo.insert_config("demo", "xigua", "prod")
+    pass

+ 342 - 217
xigua/xigua_recommend/xigua_recommend.py

@@ -5,21 +5,68 @@ import base64
 import json
 import os
 import random
+import shutil
 import string
 import sys
 import time
+from datetime import date, timedelta
+from hashlib import md5
+
 import requests
 import urllib3
 from requests.adapters import HTTPAdapter
 from selenium import webdriver
 from selenium.webdriver import DesiredCapabilities
 from selenium.webdriver.chrome.service import Service
+
 sys.path.append(os.getcwd())
+from common.publish import Publish
 from common.common import Common
 from common.feishu import Feishu
+from common.scheduling_db import MysqlHelper
 
 
 class XiguaRecommend:
+    platform = "西瓜视频"
+
+    @classmethod
+    def xigua_config(cls, log_type, crawler, text, env):
+        select_sql = f"""select * from crawler_config where source="xigua" """
+        contents = MysqlHelper.get_values(log_type, crawler, select_sql, env, action='')
+        title_list = []
+        filter_list = []
+        for content in contents:
+            config = content['config']
+            config_dict = eval(config)
+            for k, v in config_dict.items():
+                if k == "title":
+                    title_list_config = v.split(",")
+                    for title in title_list_config:
+                        title_list.append(title)
+                if k == "filter":
+                    filter_list_config = v.split(",")
+                    for filter_word in filter_list_config:
+                        filter_list.append(filter_word)
+        if text == "title":
+            return title_list
+        elif text == "filter":
+            return filter_list
+
+    @classmethod
+    def download_rule(cls, video_dict):
+        publish_time_str_rule = (date.today() + timedelta(days=-30)).strftime("%Y-%m-%d %H:%M:%S")
+        publish_time_stamp_rule = int(time.mktime(time.strptime(publish_time_str_rule, "%Y-%m-%d %H:%M:%S")))
+        if int(video_dict['play_cnt']) >= 10000:
+            if 60*30 >= int(video_dict['duration']) >= 60:
+                if int(video_dict['publish_time_stamp']) >= publish_time_stamp_rule:
+                    return True
+                else:
+                    return False
+            else:
+                return False
+        else:
+            return False
+
     @classmethod
     def random_signature(cls):
         src_digits = string.digits  # string_数字
@@ -47,64 +94,57 @@ class XiguaRecommend:
         return new_password
 
     @classmethod
-    def get_signature(cls, env):
-        # try:
-        # time1 = time.time()
-        # print(f"time1:{time1}")
-        # 打印请求配置
-        ca = DesiredCapabilities.CHROME
-        ca["goog:loggingPrefs"] = {"performance": "ALL"}
-
-        # 不打开浏览器运行
-        chrome_options = webdriver.ChromeOptions()
-        chrome_options.add_argument("headless")
-        chrome_options.add_argument(
-            f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
-        chrome_options.add_argument("--no-sandbox")
-
-        # driver初始化
-        if env == "dev":
-            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options,
-                                      service=Service('/Users/wangkun/Downloads/chromedriver/chromedriver_v111/chromedriver'))
-        else:
-            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
-
+    def get_signature(cls, log_type, crawler, env):
+        try:
+            # 打印请求配置
+            ca = DesiredCapabilities.CHROME
+            ca["goog:loggingPrefs"] = {"performance": "ALL"}
 
-        driver.implicitly_wait(10)
-        driver.get('https://www.ixigua.com/')
-        time.sleep(1)
+            # 不打开浏览器运行
+            chrome_options = webdriver.ChromeOptions()
+            chrome_options.add_argument("headless")
+            chrome_options.add_argument(
+                f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
+            chrome_options.add_argument("--no-sandbox")
 
-        # 向上滑动 1000 个像素
-        driver.execute_script('window.scrollBy(0, 2000)')
-        # Common.logger(log_type, crawler).info('刷新页面')
-        driver.refresh()
-        logs = driver.get_log("performance")
-        # Common.logger(log_type, crawler).info('已获取logs:{}\n', logs)
-        driver.quit()
-        for line in logs:
-            msg = json.loads(line['message'])
-            if 'params' not in msg['message']:
-                pass
-            elif 'documentURL' not in msg['message']['params']:
-                pass
-            elif 'www.ixigua.com' not in msg['message']['params']['documentURL']:
-                pass
-            elif 'url' not in msg['message']['params']['request']:
-                pass
-            elif '_signature' not in msg['message']['params']['request']['url']:
-                pass
+            # driver初始化
+            if env == "dev":
+                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options,
+                                          service=Service('/Users/wangkun/Downloads/chromedriver/chromedriver_v111/chromedriver'))
             else:
-                url = msg['message']['params']['request']['url']
-                signature = url.split('_signature=')[-1].split('&')[0]
-                # print(f"url:{url}")
-                # print(f"signature:{signature}")
-                time2 = time.time()
-                # print(f"time2:{time2}")
-                # print(f"duration:{time2-time1}")
-                return signature
-
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error(f'get_signature异常:{e}\n')
+                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
+
+
+            driver.implicitly_wait(10)
+            driver.get('https://www.ixigua.com/')
+            time.sleep(1)
+
+            # 向上滑动 1000 个像素
+            driver.execute_script('window.scrollBy(0, 2000)')
+            # Common.logger(log_type, crawler).info('刷新页面')
+            driver.refresh()
+            logs = driver.get_log("performance")
+            # Common.logger(log_type, crawler).info('已获取logs:{}\n', logs)
+            driver.quit()
+            for line in logs:
+                msg = json.loads(line['message'])
+                if 'params' not in msg['message']:
+                    pass
+                elif 'documentURL' not in msg['message']['params']:
+                    pass
+                elif 'www.ixigua.com' not in msg['message']['params']['documentURL']:
+                    pass
+                elif 'url' not in msg['message']['params']['request']:
+                    pass
+                elif '_signature' not in msg['message']['params']['request']['url']:
+                    pass
+                else:
+                    url = msg['message']['params']['request']['url']
+                    signature = url.split('_signature=')[-1].split('&')[0]
+                    return signature
+
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'get_signature异常:{e}\n')
 
     # 获取视频详情
     @classmethod
@@ -570,178 +610,263 @@ class XiguaRecommend:
             Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
 
     @classmethod
-    def get_videolist(cls, log_type, crawler, env):
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="西瓜视频" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def get_videolist(cls, log_type, crawler, oss_endpoint, env):
         while True:
-            try:
-                # signature = f"_{cls.random_signature()}"
-                signature = cls.get_signature(env)
-                if signature is None:
-                    Common.logger(log_type, crawler).warning(f"signature:{signature}")
-                    continue
-                url = "https://www.ixigua.com/api/feedv2/feedById?"
-                params = {
-                    "channelId": "94349543909",
-                    "count": "9",
-                    "maxTime": str(int(time.time())),
-                    "queryCount": "1",
-                    "_signature": signature,
-                    # "_signature": '_02B4Z6wo00001O38UmAAAIDBlTK5ZUm9hMDt7HbAAF9Se5',
-                    # "_signature": '_02B4Z6wo0000158YzJQAAIDC59YnkMoXHRufGMgAAIP97SpOQxVfKP5yN1rB9OQ2Be5sOOQWgCiFaeOyxlnCG4RZUX7NfDmED3tHWe2-vSJ-icJj7GZCBorr2AT2MY.Tm6TzjyGTXhKwp98X5f'
-                    # "maxTime": "1680867875",
-                    # "request_from": "701",
-                    # "offset": "0",
-                    # "referrer:": "https://open.weixin.qq.com/",
-                    # "aid": "1768",
-                    # "msToken": "Tqe-W_gibxblmWtCV1PoAUBjAb9W9lPoz8iX8OK9MS1XfRogNdVXeoxc69AKWSEObCuHssPmeRuJe1IH_G3nmTxrJc4XJMEs5iQ2ea36jFmKCTVkJ-9p-M7gcdQz3fw=",
-                    # "X-Bogus": "DFSzswVuZ6UAN9WvtV34uY/F6qyN",
-                }
-                headers = {
-                    'referer': 'https://www.ixigua.com/?is_new_connect=0&is_new_user=0',
-                    'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.54',
-                    # 'authority': 'www.ixigua.com',
-                    # 'accept': 'application/json, text/plain, */*',
-                    # 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
-                    # 'cache-control': 'no-cache',
-                    # 'cookie': 'MONITOR_WEB_ID=67cb5099-a022-4ec3-bb8e-c4de6ba51dd0; s_v_web_id=verify_lef4i99x_32SosrdH_Qrtk_4LJn_8S7q_fhu16xe3s8ZV; support_webp=true; support_avif=false; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; passport_csrf_token=72b2574f3c99f8ba670e42df430218fd; passport_csrf_token_default=72b2574f3c99f8ba670e42df430218fd; sid_guard=c7472b508ea631823ba765a60cf8757f%7C1680867422%7C3024002%7CFri%2C+12-May-2023+11%3A37%3A04+GMT; uid_tt=c13f47d51767f616befe32fb3e9f485a; uid_tt_ss=c13f47d51767f616befe32fb3e9f485a; sid_tt=c7472b508ea631823ba765a60cf8757f; sessionid=c7472b508ea631823ba765a60cf8757f; sessionid_ss=c7472b508ea631823ba765a60cf8757f; sid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; ssid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; __ac_nonce=064300065001db7f6a17b; __ac_signature=_02B4Z6wo00f01818fmAAAIDCtbKVZ8QwbVPNXHrAAJd4Fp5IJBrYy-5AgEoa72Xn.rSoHeAReu30RHJAVrhA5vJusD5C-.mKhoov6Xgsg-ppp08LmOqE770Q-TRNhVGRJBKwb1ueF3QyPH2Jca; odin_tt=b893608d4dde2e1e8df8cd5d97a0e2fbeafc4ca762ac72ebef6e6c97e2ed19859bb01d46b4190ddd6dd17d7f9678e1de; msToken=Tqe-W_gibxblmWtCV1PoAUBjAb9W9lPoz8iX8OK9MS1XfRogNdVXeoxc69AKWSEObCuHssPmeRuJe1IH_G3nmTxrJc4XJMEs5iQ2ea36jFmKCTVkJ-9p-M7gcdQz3fw=; tt_scid=7SO17t4-YtgZpkEX-9CRvB9s98xYEiDf-C10y9i1SxUCRIQFbRgr8N8Hkb5JXjjZ83e7; ttwid=1%7CHHtv2QqpSGuSu8r-zXF1QoWsvjmNi1SJrqOrZzg-UCY%7C1680867977%7C9027097968bd917c32a425e8d5661663df403e6a57a38dff12d4725a783f247c; ixigua-a-s=1; ixigua-a-s=3',
-                    # 'pragma': 'no-cache',
-                    # 'sec-ch-ua': '"Microsoft Edge";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
-                    # 'sec-ch-ua-mobile': '?0',
-                    # 'sec-ch-ua-platform': '"macOS"',
-                    # 'sec-fetch-dest': 'empty',
-                    # 'sec-fetch-mode': 'cors',
-                    # 'sec-fetch-site': 'same-origin',
-                    # 'tt-anti-token': 'r8MhLGUgtoX-95d1758d7d3522be689af62ddc195c1ed6adb1249ca9cb84b39168213da98c63',
-                    # 'x-secsdk-csrf-token': '00010000000182d3d5c3e286e4c4538dd74a7ae03396eabdcc95b454f49a1e6029b52f9046fb1753a48082f54679'
-                }
-                urllib3.disable_warnings()
-                s = requests.session()
-                # max_retries=3 重试3次
-                s.mount('http://', HTTPAdapter(max_retries=3))
-                s.mount('https://', HTTPAdapter(max_retries=3))
-                response = requests.get(url=url, headers=headers, params=params, proxies=Common.tunnel_proxies(), verify=False, timeout=5)
-                response.close()
-                if response.status_code != 200:
-                    Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.text}\n")
-                    return
-                elif 'data' not in response.text:
-                    Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.text}\n")
-                    return
-                elif 'channelFeed' not in response.json()['data']:
-                    Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.json()}\n")
-                    return
-                elif 'Data' not in response.json()['data']['channelFeed']:
-                    Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.json()}\n")
-                    return
-                elif len(response.json()['data']['channelFeed']['Data']) == 0:
-                    Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.json()}\n")
-                    return
-                else:
-                    videoList = response.json()['data']['channelFeed']['Data']
-                    for i in range(len(videoList)):
-                        if 'data' not in videoList[i]:
-                            continue
-                        # video_title
-                        video_title = videoList[i]['data'].get('title', '')
-                        # video_id
-                        video_id = videoList[i]['data'].get('vid', '')
-                        # play_cnt
-                        play_cnt = int(videoList[i]['data'].get('playNum', 0))
-                        # comment_cnt
-                        comment_cnt = int(videoList[i]['data'].get('commentNum', 0))
-                        # gid
-                        gid = videoList[i]['data'].get('item_id', 0)
-                        # share_cnt / like_cnt
-                        share_cnt = 0
-                        like_cnt = 0
-                        # duration
-                        duration = int(videoList[i]['data'].get('duration', 0))
-                        # publish_time_stamp
-                        publish_time_stamp = int(videoList[i]['data'].get('publish_time', 0))
-                        # publish_time_str
-                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-                        # cover_url
-                        cover_url = videoList[i]['data'].get('image_url', '')
-                        # user_name
-                        user_name = videoList[i]['data']['user_info'].get('name', '')
-                        # user_id
-                        user_id = videoList[i]['data']['user_info'].get('user_id', '')
-                        # avatar_url
-                        avatar_url = videoList[i]['data']['user_info'].get('avatar_url', '')
-
-                        if gid == 0 or video_id == '' or cover_url == '':
-                            Common.logger(log_type, crawler).info(f'{video_title}:无效视频\n')
-                        else:
-                            video_url_dict = cls.get_video_url(log_type, crawler, gid)
-                            video_url = video_url_dict["video_url"]
-                            audio_url = video_url_dict["audio_url"]
-                            video_width = video_url_dict["video_width"]
-                            video_height = video_url_dict["video_height"]
-
-                            video_dict = {
-                                'video_title': video_title,
-                                'video_id': video_id,
-                                'gid': gid,
-                                'play_cnt': play_cnt,
-                                'comment_cnt': comment_cnt,
-                                'like_cnt': like_cnt,
-                                'share_cnt': share_cnt,
-                                'video_width': video_width,
-                                'video_height': video_height,
-                                'duration': duration,
-                                'publish_time_stamp': publish_time_stamp,
-                                'publish_time_str': publish_time_str,
-                                'user_name': user_name,
-                                'user_id': user_id,
-                                'avatar_url': avatar_url,
-                                'cover_url': cover_url,
-                                'audio_url': audio_url,
-                                'video_url': video_url,
-                                'session': signature
-                            }
-                            for k, v in video_dict.items():
-                                Common.logger(log_type, crawler).info(f"{k}:{v}")
-                            cls.download_publish(log_type, crawler, video_dict)
-            except Exception as e:
-                Common.logger(log_type, crawler).error(f"get_videolist:{e}\n")
+            signature = cls.get_signature(log_type, crawler, env)
+            if signature is None:
+                Common.logger(log_type, crawler).warning(f"signature:{signature}")
+                continue
+            url = "https://www.ixigua.com/api/feedv2/feedById?"
+            params = {
+                "channelId": "94349543909",
+                "count": "9",
+                "maxTime": str(int(time.time())),
+                "queryCount": "1",
+                "_signature": signature,
+                # "_signature": '_02B4Z6wo00001O38UmAAAIDBlTK5ZUm9hMDt7HbAAF9Se5',
+                # "_signature": '_02B4Z6wo0000158YzJQAAIDC59YnkMoXHRufGMgAAIP97SpOQxVfKP5yN1rB9OQ2Be5sOOQWgCiFaeOyxlnCG4RZUX7NfDmED3tHWe2-vSJ-icJj7GZCBorr2AT2MY.Tm6TzjyGTXhKwp98X5f'
+                # "maxTime": "1680867875",
+                # "request_from": "701",
+                # "offset": "0",
+                # "referrer:": "https://open.weixin.qq.com/",
+                # "aid": "1768",
+                # "msToken": "Tqe-W_gibxblmWtCV1PoAUBjAb9W9lPoz8iX8OK9MS1XfRogNdVXeoxc69AKWSEObCuHssPmeRuJe1IH_G3nmTxrJc4XJMEs5iQ2ea36jFmKCTVkJ-9p-M7gcdQz3fw=",
+                # "X-Bogus": "DFSzswVuZ6UAN9WvtV34uY/F6qyN",
+            }
+            headers = {
+                'referer': 'https://www.ixigua.com/?is_new_connect=0&is_new_user=0',
+                'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.54',
+                # 'authority': 'www.ixigua.com',
+                # 'accept': 'application/json, text/plain, */*',
+                # 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
+                # 'cache-control': 'no-cache',
+                # 'cookie': 'MONITOR_WEB_ID=67cb5099-a022-4ec3-bb8e-c4de6ba51dd0; s_v_web_id=verify_lef4i99x_32SosrdH_Qrtk_4LJn_8S7q_fhu16xe3s8ZV; support_webp=true; support_avif=false; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; passport_csrf_token=72b2574f3c99f8ba670e42df430218fd; passport_csrf_token_default=72b2574f3c99f8ba670e42df430218fd; sid_guard=c7472b508ea631823ba765a60cf8757f%7C1680867422%7C3024002%7CFri%2C+12-May-2023+11%3A37%3A04+GMT; uid_tt=c13f47d51767f616befe32fb3e9f485a; uid_tt_ss=c13f47d51767f616befe32fb3e9f485a; sid_tt=c7472b508ea631823ba765a60cf8757f; sessionid=c7472b508ea631823ba765a60cf8757f; sessionid_ss=c7472b508ea631823ba765a60cf8757f; sid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; ssid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; __ac_nonce=064300065001db7f6a17b; __ac_signature=_02B4Z6wo00f01818fmAAAIDCtbKVZ8QwbVPNXHrAAJd4Fp5IJBrYy-5AgEoa72Xn.rSoHeAReu30RHJAVrhA5vJusD5C-.mKhoov6Xgsg-ppp08LmOqE770Q-TRNhVGRJBKwb1ueF3QyPH2Jca; odin_tt=b893608d4dde2e1e8df8cd5d97a0e2fbeafc4ca762ac72ebef6e6c97e2ed19859bb01d46b4190ddd6dd17d7f9678e1de; msToken=Tqe-W_gibxblmWtCV1PoAUBjAb9W9lPoz8iX8OK9MS1XfRogNdVXeoxc69AKWSEObCuHssPmeRuJe1IH_G3nmTxrJc4XJMEs5iQ2ea36jFmKCTVkJ-9p-M7gcdQz3fw=; tt_scid=7SO17t4-YtgZpkEX-9CRvB9s98xYEiDf-C10y9i1SxUCRIQFbRgr8N8Hkb5JXjjZ83e7; ttwid=1%7CHHtv2QqpSGuSu8r-zXF1QoWsvjmNi1SJrqOrZzg-UCY%7C1680867977%7C9027097968bd917c32a425e8d5661663df403e6a57a38dff12d4725a783f247c; ixigua-a-s=1; ixigua-a-s=3',
+                # 'pragma': 'no-cache',
+                # 'sec-ch-ua': '"Microsoft Edge";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
+                # 'sec-ch-ua-mobile': '?0',
+                # 'sec-ch-ua-platform': '"macOS"',
+                # 'sec-fetch-dest': 'empty',
+                # 'sec-fetch-mode': 'cors',
+                # 'sec-fetch-site': 'same-origin',
+                # 'tt-anti-token': 'r8MhLGUgtoX-95d1758d7d3522be689af62ddc195c1ed6adb1249ca9cb84b39168213da98c63',
+                # 'x-secsdk-csrf-token': '00010000000182d3d5c3e286e4c4538dd74a7ae03396eabdcc95b454f49a1e6029b52f9046fb1753a48082f54679'
+            }
+            urllib3.disable_warnings()
+            s = requests.session()
+            # max_retries=3 重试3次
+            s.mount('http://', HTTPAdapter(max_retries=3))
+            s.mount('https://', HTTPAdapter(max_retries=3))
+            response = requests.get(url=url, headers=headers, params=params, proxies=Common.tunnel_proxies(), verify=False, timeout=5)
+            response.close()
+            if response.status_code != 200:
+                Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.text}\n")
+                return
+            elif 'data' not in response.text:
+                Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.text}\n")
+                return
+            elif 'channelFeed' not in response.json()['data']:
+                Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.json()}\n")
+                return
+            elif 'Data' not in response.json()['data']['channelFeed']:
+                Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.json()}\n")
+                return
+            elif len(response.json()['data']['channelFeed']['Data']) == 0:
+                Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.json()}\n")
+                return
+            else:
+                videoList = response.json()['data']['channelFeed']['Data']
+                for i in range(len(videoList)):
+                    if 'data' not in videoList[i]:
+                        continue
+                    # video_title
+                    video_title = videoList[i]['data'].get('title', '')
+                    if video_title == '':
+                        video_title = random.choice(cls.xigua_config(log_type, crawler, "title", env))
+                    # video_id
+                    video_id = videoList[i]['data'].get('vid', '')
+                    # play_cnt
+                    play_cnt = int(videoList[i]['data'].get('playNum', 0))
+                    # comment_cnt
+                    comment_cnt = int(videoList[i]['data'].get('commentNum', 0))
+                    # gid
+                    gid = videoList[i]['data'].get('item_id', 0)
+                    # share_cnt / like_cnt
+                    share_cnt = 0
+                    like_cnt = 0
+                    # duration
+                    duration = int(videoList[i]['data'].get('duration', 0))
+                    # publish_time_stamp
+                    publish_time_stamp = int(videoList[i]['data'].get('publish_time', 0))
+                    # publish_time_str
+                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                    # cover_url
+                    cover_url = videoList[i]['data'].get('image_url', '')
+                    # user_name
+                    user_name = videoList[i]['data']['user_info'].get('name', '')
+                    # user_id
+                    user_id = videoList[i]['data']['user_info'].get('user_id', '')
+                    # avatar_url
+                    avatar_url = videoList[i]['data']['user_info'].get('avatar_url', '')
+
+                    video_dict = {
+                        'video_title': video_title,
+                        'video_id': video_id,
+                        'gid': gid,
+                        'play_cnt': play_cnt,
+                        'comment_cnt': comment_cnt,
+                        'like_cnt': like_cnt,
+                        'share_cnt': share_cnt,
+                        'duration': duration,
+                        'publish_time_stamp': publish_time_stamp,
+                        'publish_time_str': publish_time_str,
+                        'user_name': user_name,
+                        'user_id': user_id,
+                        'avatar_url': avatar_url,
+                        'cover_url': cover_url,
+                        'session': signature
+                    }
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                    if gid == 0 or video_id == '' or cover_url == '':
+                        Common.logger(log_type, crawler).info('无效视频\n')
+                    elif cls.download_rule(video_dict) is False:
+                        Common.logger(log_type, crawler).info('不满足抓取规则\n')
+                    elif any(str(word) if str(word) in video_title else False for word in cls.xigua_config(log_type, crawler, "filter", env)) is True:
+                        Common.logger(log_type, crawler).info('已中过滤词\n')
+                    elif cls.repeat_video(log_type, crawler, video_id, env) != 0:
+                        Common.logger(log_type, crawler).info('视频已下载\n')
+                    else:
+                        video_url_dict = cls.get_video_url(log_type, crawler, gid)
+                        video_dict['video_url'] = video_url_dict["video_url"]
+                        video_dict["audio_url"] = video_url_dict["audio_url"]
+                        video_dict["video_width"] = video_url_dict["video_width"]
+                        video_dict["video_height"] = video_url_dict["video_height"]
+
+                        cls.download_publish(log_type, crawler, video_dict, oss_endpoint, env)
 
     @classmethod
-    def download_publish(cls, log_type, crawler, video_dict):
-        if video_dict['video_id'] in [y for x in Feishu.get_values_batch(log_type, crawler, "1iKGF1") for y in x]:
-            Common.logger(log_type, crawler).info("视频已存在\n")
-        elif any(word if word in video_dict['video_title'] else False for word in
-                 cls.filter_words(log_type, crawler)) is True:
-            Common.logger(log_type, crawler).info('标题已中过滤词\n')
+    def download_publish(cls, log_type, crawler, video_dict, oss_endpoint, env):
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text='xigua_video', title=video_dict['video_title'],
+                               url=video_dict['video_url'])
+        # 下载音频
+        Common.download_method(log_type=log_type, crawler=crawler, text='xigua_audio', title=video_dict['video_title'],
+                               url=video_dict['audio_url'])
+        # 合成音视频
+        Common.video_compose(log_type=log_type, crawler=crawler,
+                             video_dir=f"./{crawler}/videos/{video_dict['video_title']}")
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            return
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'],
+                               url=video_dict['cover_url'])
+        # 保存视频信息至txt
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                  crawler=crawler,
+                                                  strategy="推荐榜爬虫策略",
+                                                  our_uid="recommend",
+                                                  env=env,
+                                                  oss_endpoint=oss_endpoint)
+        if env == 'dev':
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
         else:
-            Feishu.insert_columns(log_type, crawler, "1iKGF1", "ROWS", 1, 2)
-            values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
-                       "西瓜推荐榜",
-                       video_dict['video_title'],
-                       video_dict['video_id'],
-                       "",
-                       video_dict['gid'],
-                       video_dict['play_cnt'],
-                       video_dict['comment_cnt'],
-                       video_dict['like_cnt'],
-                       video_dict['share_cnt'],
-                       video_dict['duration'],
-                       f"{video_dict['video_width']}*{video_dict['video_height']}",
-                       video_dict['publish_time_str'],
-                       video_dict['user_name'],
-                       video_dict['user_id'],
-                       video_dict['avatar_url'],
-                       video_dict['cover_url'],
-                       video_dict['audio_url'],
-                       video_dict['video_url']]]
-            time.sleep(0.5)
-            Feishu.update_values(log_type, crawler, "1iKGF1", "F2:Z2", values)
-            Common.logger(log_type, crawler).info("写入飞书成功\n")
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        Common.logger(log_type, crawler).info("视频上传完成")
+
+        if our_video_id is None:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+            return
+
+        # 视频写入飞书
+        Feishu.insert_columns(log_type, 'xigua', "1iKGF1", "ROWS", 1, 2)
+        upload_time = int(time.time())
+        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                   "推荐榜爬虫策略",
+                   video_dict['video_title'],
+                   str(video_dict['video_id']),
+                   our_video_link,
+                   video_dict['gid'],
+                   video_dict['play_cnt'],
+                   video_dict['comment_cnt'],
+                   video_dict['like_cnt'],
+                   video_dict['share_cnt'],
+                   video_dict['duration'],
+                   str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
+                   video_dict['publish_time_str'],
+                   video_dict['user_name'],
+                   video_dict['user_id'],
+                   video_dict['avatar_url'],
+                   video_dict['cover_url'],
+                   video_dict['audio_url'],
+                   video_dict['video_url']]]
+        time.sleep(1)
+        Feishu.update_values(log_type, 'xigua', "1iKGF1", "F2:Z2", values)
+        Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+
+        rule_dict = {
+            "play_cnt": {"min": 10000},
+            "duration": {"min": 60, "max": 60*30},
+            "publish_day": {"min": 30}
+        }
+
+        # 视频信息保存数据库
+        insert_sql = f""" insert into crawler_video(video_id,
+                                        user_id,
+                                        out_user_id,
+                                        platform,
+                                        strategy,
+                                        out_video_id,
+                                        video_title,
+                                        cover_url,
+                                        video_url,
+                                        duration,
+                                        publish_time,
+                                        play_cnt,
+                                        crawler_rule,
+                                        width,
+                                        height)
+                                        values({our_video_id},
+                                        {int(50322238)},
+                                        "{video_dict['user_id']}",
+                                        "{cls.platform}",
+                                        "推荐榜爬虫策略",
+                                        "{video_dict['video_id']}",
+                                        "{video_dict['video_title']}",
+                                        "{video_dict['cover_url']}",
+                                        "{video_dict['video_url']}",
+                                        {int(video_dict['duration'])},
+                                        "{video_dict['publish_time_str']}",
+                                        {int(video_dict['play_cnt'])},
+                                        '{json.dumps(rule_dict)}',
+                                        {int(video_dict['video_width'])},
+                                        {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env, action='')
+        Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
 
 
 
 
 if __name__ == "__main__":
     # XiguaRecommend.get_signature("recommend", "xigua", "dev")
-    XiguaRecommend.get_videolist("recommend", "xigua", "dev")
+    # XiguaRecommend.get_videolist("recommend", "xigua", "dev")
     # print(XiguaRecommend.get_video_url("recommend", "xigua", "7218171653242094139"))
     # print(XiguaRecommend.filter_words("recommend", "xigua"))
+    print(XiguaRecommend.xigua_config("recommend", "xigua", "title", "dev"))
     pass