wangkun 1 рік тому
батько
коміт
11c592ffde

+ 1 - 0
README.MD

@@ -166,6 +166,7 @@ ps aux | grep xiaoniangao | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep kanyikan | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep Appium | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep shipinhao | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep Python | grep -v grep | awk '{print $2}' | xargs kill -9
 ```
 
 #### 生成 requirements.txt

+ 3 - 4
common/feishu.py

@@ -473,13 +473,12 @@ class Feishu:
             elif crawler == "weixinzhishu" and text == "今日微信指数抓取完毕":
                 content = "微信指数"
                 sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnqhMRUGunIfGnGXMOBYiy4K?sheet=sVL74k"
-                users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "yuzhuoyi")) + "></at> <at id=" + str(
-                    cls.get_userid(log_type, crawler, "rennian")) + "></at>\n"
+                users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "yuzhuoyi")) + "></at>\n"
             elif crawler == "weixinzhishu":
                 content = "微信指数"
                 sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnqhMRUGunIfGnGXMOBYiy4K?sheet=sVL74k"
                 users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
-                    cls.get_userid(log_type, crawler, "rennian")) + "></at>\n"
+                    cls.get_userid(log_type, crawler, "muxinyi")) + "></at>\n"
 
             elif crawler == "xiaoniangao_hour":
                 content = "小年糕_小时级_已下载表"
@@ -609,7 +608,7 @@ class Feishu:
             elif crawler == "gongzhonghao":
                 content = "公众号_信欣_爬虫表"
                 sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcna98M2mX7TbivTj9Sb7WKBN?"
-                users = f"\n<at id={str(cls.get_userid(log_type, crawler, 'huxinxue'))}></at> <at id={str(cls.get_userid(log_type, crawler, 'wangxueke'))}></at> <at id={str(cls.get_userid(log_type, crawler, 'rennian'))}></at> <at id={str(cls.get_userid(log_type, crawler, 'xinxin'))}></at>\n"
+                users = f"\n<at id={str(cls.get_userid(log_type, crawler, 'huxinxue'))}></at> <at id={str(cls.get_userid(log_type, crawler, 'wangxueke'))}></at> <at id={str(cls.get_userid(log_type, crawler, 'xinxin'))}></at>\n"
 
             elif crawler == "weiqun":
                 content = "微群爬虫表"

+ 44 - 0
common/log.py

@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/3
+import os
+import logging
+from datetime import time, date
+from logging.handlers import TimedRotatingFileHandler
+
+
+class Log:
+    logger = None
+    handler = None
+
+    @classmethod
+    def logging(cls, log_type, crawler):
+        # 日志路径
+        log_dir = f"./{crawler}/logs/"
+        log_path = os.getcwd() + os.sep + log_dir
+        if not os.path.isdir(log_path):
+            os.makedirs(log_path)
+
+        # 日志文件名
+        log_name = f"{date.today()}-{crawler}-{log_type}.log"
+
+        if cls.logger is None:
+            cls.logger = logging.getLogger(__name__)
+            cls.logger.setLevel(logging.INFO)
+
+        if cls.handler is None:
+            cls.handler = TimedRotatingFileHandler(
+                os.path.join(log_path, log_name),
+                when="midnight",
+                interval=1,
+                atTime=time(hour=0, minute=0, second=0),
+                backupCount=20) # 设置保留的日志文件数量
+            formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(filename)s - line %(lineno)d - %(message)s')
+            cls.handler.setFormatter(formatter)
+            cls.logger.addHandler(cls.handler)
+
+        return cls.logger
+
+
+if __name__ == "__main__":
+    Log.log()

+ 26 - 0
common/public.py

@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
 # @Author: wangkun
 # @Time: 2023/3/27
+import requests
 from mq_http_sdk.mq_client import *
 from mq_http_sdk.mq_exception import MQExceptionBase
 import os, sys, jieba
@@ -254,6 +255,31 @@ def get_title_score(log_type, crawler, stop_sheet, score_sheet, title):
     return title_score
 
 
+def task_unbind(log_type, crawler, taskid, uids, env):
+    if env == "dev":
+        url = "https://testadmin.piaoquantv.com/manager/crawler/v3/task/unbind"
+    else:
+        url = "https://admin.piaoquantv.com/manager/crawler/v3/task/unbind"
+
+    params = {
+        "taskId": taskid,  # 任务 ID
+        "uids": uids,  # 解绑用户uid(多个英文逗号隔开),例如"3222121,213231"
+        "operator": ""  # 默认 system
+    }
+    Common.logger(log_type, crawler).info(f"url:{url}")
+    Common.logging(log_type, crawler, env, f"url:{url}")
+    Common.logger(log_type, crawler).info(f"params:{params}")
+    Common.logging(log_type, crawler, env, f"params:{params}")
+    response = requests.post(url=url, json=params)
+    Common.logger(log_type, crawler).info(f"task_unbind_response:{response.text}")
+    Common.logging(log_type, crawler, env, f"task_unbind_response:{response.text}")
+    if response.status_code == 200 and response.json()["code"] == 0:
+        return "success"
+    else:
+        return response.text
+
+
+
 if __name__ == "__main__":
     print(get_title_score("recommend", "kuaishou", "16QspO", "0usaDk", '像梦一场'))
     pass

+ 3 - 0
dev/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/4

+ 3 - 0
dev/dev_main/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/4

+ 30 - 0
dev/dev_main/run_dev.py

@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/6/13
+import os
+import sys
+import time
+sys.path.append(os.getcwd())
+from common.log import Log
+from common.common import Common
+from dev.dev_script.xigua_search_publish_time import XiguasearchScheduling
+
+
+def xigua_search_main(log_type, crawler, env):
+    while True:
+        Log.logging(log_type, crawler).info("开始抓取西瓜搜索")
+        XiguasearchScheduling.get_search_videos(log_type=log_type,
+                                                crawler=crawler,
+                                                rule_dict={"play_cnt":{"min":8000,"max":0},"duration":{"min":60,"max":600},"period":{"min":365,"max":365},"videos_cnt":{"min":30,"max":0}},
+                                                user_list=[{"uid": 6267140, "source": "xigua", "link": "健康", "nick_name": "健康", "avatar_url": "http://rescdn.yishihui.com/user/default/avatar/live/1616555578819_u=1922778943,2660693611&fm=26&gp=0.jpg", "mode": "search"},
+                                                           {"uid": 6267140, "source": "xigua", "link": "瓦格纳", "nick_name": "瓦格纳", "avatar_url": "http://rescdn.yishihui.com/user/default/avatar/live/1616555578819_u=1922778943,2660693611&fm=26&gp=0.jpg", "mode": "search"},
+                                                           {"uid": 6267141, "source": "xigua", "link": "高考分数线", "nick_name": "高考分数线", "avatar_url": "http://rescdn.yishihui.com/user/default/avatar/live/1616555578819_u=1922778943,2660693611&fm=26&gp=0.jpg", "mode": "search"}],
+                                                env=env)
+        Common.del_logs(log_type, crawler)
+        Log.logging(log_type, crawler).info("抓取一轮结束\n")
+        Log.logging(log_type, crawler).info("休眠 1 小时")
+        time.sleep(3600)
+
+
+if __name__ == "__main__":
+    xigua_search_main("search", "dev", "dev")

+ 3 - 0
dev/dev_script/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/4

+ 828 - 0
dev/dev_script/xigua_search_publish_time.py

@@ -0,0 +1,828 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/5/26
+import base64
+import json
+import os
+import random
+import shutil
+import string
+import sys
+import time
+from hashlib import md5
+import requests
+import urllib3
+from requests.adapters import HTTPAdapter
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
+from selenium import webdriver
+from selenium.webdriver.common.by import By
+sys.path.append(os.getcwd())
+from common.log import Log
+# from common.mq import MQ
+from common.scheduling_db import MysqlHelper
+from common.common import Common
+from common.feishu import Feishu
+from common.publish import Publish
+from common.public import get_config_from_mysql, download_rule
+from common.userAgent import get_random_user_agent
+
+
+class XiguasearchScheduling:
+    # 已下载视频数
+    download_cnt = 0
+    platform = "西瓜视频"
+
+    @classmethod
+    def random_signature(cls):
+        src_digits = string.digits  # string_数字
+        src_uppercase = string.ascii_uppercase  # string_大写字母
+        src_lowercase = string.ascii_lowercase  # string_小写字母
+        digits_num = random.randint(1, 6)
+        uppercase_num = random.randint(1, 26 - digits_num - 1)
+        lowercase_num = 26 - (digits_num + uppercase_num)
+        password = random.sample(src_digits, digits_num) + random.sample(src_uppercase, uppercase_num) + random.sample(
+            src_lowercase, lowercase_num)
+        random.shuffle(password)
+        new_password = 'AAAAAAAAAA' + ''.join(password)[10:-4] + 'AAAB'
+        new_password_start = new_password[0:18]
+        new_password_end = new_password[-7:]
+        if new_password[18] == '8':
+            new_password = new_password_start + 'w' + new_password_end
+        elif new_password[18] == '9':
+            new_password = new_password_start + 'x' + new_password_end
+        elif new_password[18] == '-':
+            new_password = new_password_start + 'y' + new_password_end
+        elif new_password[18] == '.':
+            new_password = new_password_start + 'z' + new_password_end
+        else:
+            new_password = new_password_start + 'y' + new_password_end
+        return new_password
+
+    @classmethod
+    def get_video_url(cls, video_info):
+        video_url_dict = {}
+        # video_url
+        if 'videoResource' not in video_info:
+            video_url_dict["video_url"] = ''
+            video_url_dict["audio_url"] = ''
+            video_url_dict["video_width"] = 0
+            video_url_dict["video_height"] = 0
+
+        elif 'dash_120fps' in video_info['videoResource']:
+            if "video_list" in video_info['videoResource']['dash_120fps'] and 'video_4' in \
+                    video_info['videoResource']['dash_120fps']['video_list']:
+                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_4']['backup_url_1']
+                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_4']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_4']['vwidth']
+                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_4']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_3' in \
+                    video_info['videoResource']['dash_120fps']['video_list']:
+                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_3']['backup_url_1']
+                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_3']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_3']['vwidth']
+                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_3']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_2' in \
+                    video_info['videoResource']['dash_120fps']['video_list']:
+                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_2']['backup_url_1']
+                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_2']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_2']['vwidth']
+                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_2']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_1' in \
+                    video_info['videoResource']['dash_120fps']['video_list']:
+                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_1']['backup_url_1']
+                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_1']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_1']['vwidth']
+                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_1']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+
+            elif 'dynamic_video' in video_info['videoResource']['dash_120fps'] \
+                    and 'dynamic_video_list' in video_info['videoResource']['dash_120fps']['dynamic_video'] \
+                    and 'dynamic_audio_list' in video_info['videoResource']['dash_120fps']['dynamic_video'] \
+                    and len(
+                video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list']) != 0 \
+                    and len(
+                video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_audio_list']) != 0:
+
+                video_url = \
+                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1][
+                        'backup_url_1']
+                audio_url = \
+                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_audio_list'][-1][
+                        'backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = \
+                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1][
+                        'vwidth']
+                video_height = \
+                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1][
+                        'vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            else:
+                video_url_dict["video_url"] = ''
+                video_url_dict["audio_url"] = ''
+                video_url_dict["video_width"] = 0
+                video_url_dict["video_height"] = 0
+
+        elif 'dash' in video_info['videoResource']:
+            if "video_list" in video_info['videoResource']['dash'] and 'video_4' in \
+                    video_info['videoResource']['dash']['video_list']:
+                video_url = video_info['videoResource']['dash']['video_list']['video_4']['backup_url_1']
+                audio_url = video_info['videoResource']['dash']['video_list']['video_4']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['video_list']['video_4']['vwidth']
+                video_height = video_info['videoResource']['dash']['video_list']['video_4']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash'] and 'video_3' in \
+                    video_info['videoResource']['dash']['video_list']:
+                video_url = video_info['videoResource']['dash']['video_list']['video_3']['backup_url_1']
+                audio_url = video_info['videoResource']['dash']['video_list']['video_3']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['video_list']['video_3']['vwidth']
+                video_height = video_info['videoResource']['dash']['video_list']['video_3']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash'] and 'video_2' in \
+                    video_info['videoResource']['dash']['video_list']:
+                video_url = video_info['videoResource']['dash']['video_list']['video_2']['backup_url_1']
+                audio_url = video_info['videoResource']['dash']['video_list']['video_2']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['video_list']['video_2']['vwidth']
+                video_height = video_info['videoResource']['dash']['video_list']['video_2']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['dash'] and 'video_1' in \
+                    video_info['videoResource']['dash']['video_list']:
+                video_url = video_info['videoResource']['dash']['video_list']['video_1']['backup_url_1']
+                audio_url = video_info['videoResource']['dash']['video_list']['video_1']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['video_list']['video_1']['vwidth']
+                video_height = video_info['videoResource']['dash']['video_list']['video_1']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+
+            elif 'dynamic_video' in video_info['videoResource']['dash'] \
+                    and 'dynamic_video_list' in video_info['videoResource']['dash']['dynamic_video'] \
+                    and 'dynamic_audio_list' in video_info['videoResource']['dash']['dynamic_video'] \
+                    and len(video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list']) != 0 \
+                    and len(video_info['videoResource']['dash']['dynamic_video']['dynamic_audio_list']) != 0:
+
+                video_url = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
+                    'backup_url_1']
+                audio_url = video_info['videoResource']['dash']['dynamic_video']['dynamic_audio_list'][-1][
+                    'backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
+                    'vwidth']
+                video_height = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
+                    'vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            else:
+                video_url_dict["video_url"] = ''
+                video_url_dict["audio_url"] = ''
+                video_url_dict["video_width"] = 0
+                video_url_dict["video_height"] = 0
+
+        elif 'normal' in video_info['videoResource']:
+            if "video_list" in video_info['videoResource']['normal'] and 'video_4' in \
+                    video_info['videoResource']['normal']['video_list']:
+                video_url = video_info['videoResource']['normal']['video_list']['video_4']['backup_url_1']
+                audio_url = video_info['videoResource']['normal']['video_list']['video_4']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['video_list']['video_4']['vwidth']
+                video_height = video_info['videoResource']['normal']['video_list']['video_4']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['normal'] and 'video_3' in \
+                    video_info['videoResource']['normal']['video_list']:
+                video_url = video_info['videoResource']['normal']['video_list']['video_3']['backup_url_1']
+                audio_url = video_info['videoResource']['normal']['video_list']['video_3']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['video_list']['video_3']['vwidth']
+                video_height = video_info['videoResource']['normal']['video_list']['video_3']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['normal'] and 'video_2' in \
+                    video_info['videoResource']['normal']['video_list']:
+                video_url = video_info['videoResource']['normal']['video_list']['video_2']['backup_url_1']
+                audio_url = video_info['videoResource']['normal']['video_list']['video_2']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['video_list']['video_2']['vwidth']
+                video_height = video_info['videoResource']['normal']['video_list']['video_2']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            elif "video_list" in video_info['videoResource']['normal'] and 'video_1' in \
+                    video_info['videoResource']['normal']['video_list']:
+                video_url = video_info['videoResource']['normal']['video_list']['video_1']['backup_url_1']
+                audio_url = video_info['videoResource']['normal']['video_list']['video_1']['backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['video_list']['video_1']['vwidth']
+                video_height = video_info['videoResource']['normal']['video_list']['video_1']['vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+
+            elif 'dynamic_video' in video_info['videoResource']['normal'] \
+                    and 'dynamic_video_list' in video_info['videoResource']['normal']['dynamic_video'] \
+                    and 'dynamic_audio_list' in video_info['videoResource']['normal']['dynamic_video'] \
+                    and len(video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list']) != 0 \
+                    and len(video_info['videoResource']['normal']['dynamic_video']['dynamic_audio_list']) != 0:
+
+                video_url = video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list'][-1][
+                    'backup_url_1']
+                audio_url = video_info['videoResource']['normal']['dynamic_video']['dynamic_audio_list'][-1][
+                    'backup_url_1']
+                if len(video_url) % 3 == 1:
+                    video_url += '=='
+                elif len(video_url) % 3 == 2:
+                    video_url += '='
+                elif len(audio_url) % 3 == 1:
+                    audio_url += '=='
+                elif len(audio_url) % 3 == 2:
+                    audio_url += '='
+                video_url = base64.b64decode(video_url).decode('utf8')
+                audio_url = base64.b64decode(audio_url).decode('utf8')
+                video_width = video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list'][-1][
+                    'vwidth']
+                video_height = video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list'][-1][
+                    'vheight']
+                video_url_dict["video_url"] = video_url
+                video_url_dict["audio_url"] = audio_url
+                video_url_dict["video_width"] = video_width
+                video_url_dict["video_height"] = video_height
+            else:
+                video_url_dict["video_url"] = ''
+                video_url_dict["audio_url"] = ''
+                video_url_dict["video_width"] = 0
+                video_url_dict["video_height"] = 0
+
+        else:
+            video_url_dict["video_url"] = ''
+            video_url_dict["audio_url"] = ''
+            video_url_dict["video_width"] = 0
+            video_url_dict["video_height"] = 0
+
+        return video_url_dict
+
+    @classmethod
+    def get_comment_cnt(cls, item_id):
+        url = "https://www.ixigua.com/tlb/comment/article/v5/tab_comments/?"
+        params = {
+            "tab_index": "0",
+            "count": "10",
+            "offset": "10",
+            "group_id": str(item_id),
+            "item_id": str(item_id),
+            "aid": "1768",
+            "msToken": "50-JJObWB07HfHs-BMJWT1eIDX3G-6lPSF_i-QwxBIXE9VVa-iN0jbEXR5pG2DKjXBmP299n6ZTuXzY-GAy968CCvouSAYIS4GzvGQT3pNlKNejr5G4-1g==",
+            "X-Bogus": "DFSzswVOyGtANVeWtCLMqR/F6q9U",
+            "_signature": cls.random_signature(),
+        }
+        headers = {
+            'authority': 'www.ixigua.com',
+            'accept': 'application/json, text/plain, */*',
+            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
+            'cache-control': 'no-cache',
+            'cookie': 'MONITOR_WEB_ID=67cb5099-a022-4ec3-bb8e-c4de6ba51dd0; passport_csrf_token=72b2574f3c99f8ba670e42df430218fd; passport_csrf_token_default=72b2574f3c99f8ba670e42df430218fd; sid_guard=c7472b508ea631823ba765a60cf8757f%7C1680867422%7C3024002%7CFri%2C+12-May-2023+11%3A37%3A04+GMT; uid_tt=c13f47d51767f616befe32fb3e9f485a; uid_tt_ss=c13f47d51767f616befe32fb3e9f485a; sid_tt=c7472b508ea631823ba765a60cf8757f; sessionid=c7472b508ea631823ba765a60cf8757f; sessionid_ss=c7472b508ea631823ba765a60cf8757f; sid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; ssid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; odin_tt=b893608d4dde2e1e8df8cd5d97a0e2fbeafc4ca762ac72ebef6e6c97e2ed19859bb01d46b4190ddd6dd17d7f9678e1de; SEARCH_CARD_MODE=7168304743566296612_0; support_webp=true; support_avif=false; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; tt_scid=7Pux7s634-z8DYvCM20y7KigwH5u7Rh6D9C-RROpnT.aGMEcz6Vsxp.oai47wJqa4f86; ttwid=1%7CHHtv2QqpSGuSu8r-zXF1QoWsvjmNi1SJrqOrZzg-UCY%7C1683858689%7Ca5223fe1500578e01e138a0d71d6444692018296c4c24f5885af174a65873c95; ixigua-a-s=3; msToken=50-JJObWB07HfHs-BMJWT1eIDX3G-6lPSF_i-QwxBIXE9VVa-iN0jbEXR5pG2DKjXBmP299n6ZTuXzY-GAy968CCvouSAYIS4GzvGQT3pNlKNejr5G4-1g==; __ac_nonce=0645dcbf0005064517440; __ac_signature=_02B4Z6wo00f01FEGmAwAAIDBKchzCGqn-MBRJpyAAHAjieFC5GEg6gGiwz.I4PRrJl7f0GcixFrExKmgt6QI1i1S-dQyofPEj2ugWTCnmKUdJQv-wYuDofeKNe8VtMtZq2aKewyUGeKU-5Ud21; ixigua-a-s=3',
+            'pragma': 'no-cache',
+            'referer': f'https://www.ixigua.com/{item_id}?logTag=3c5aa86a8600b9ab8540',
+            'sec-ch-ua': '"Microsoft Edge";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
+            'sec-ch-ua-mobile': '?0',
+            'sec-ch-ua-platform': '"macOS"',
+            'sec-fetch-dest': 'empty',
+            'sec-fetch-mode': 'cors',
+            'sec-fetch-site': 'same-origin',
+            'tt-anti-token': 'cBITBHvmYjEygzv-f9c78c1297722cf1f559c74b084e4525ce4900bdcf9e8588f20cc7c2e3234422',
+            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35',
+            'x-secsdk-csrf-token': '000100000001f8e733cf37f0cd255a51aea9a81ff7bc0c09490cfe41ad827c3c5c18ec809279175e4d9f5553d8a5'
+        }
+        urllib3.disable_warnings()
+        s = requests.session()
+        # max_retries=3 重试3次
+        s.mount('http://', HTTPAdapter(max_retries=3))
+        s.mount('https://', HTTPAdapter(max_retries=3))
+        response = s.get(url=url, headers=headers, params=params, verify=False, proxies=Common.tunnel_proxies(), timeout=5)
+        response.close()
+        if response.status_code != 200 or 'total_number' not in response.json() or response.json() == {}:
+            return 0
+        return response.json().get("total_number", 0)
+
+    # 获取视频详情
+    @classmethod
+    def get_video_info(cls, log_type, crawler, item_id):
+        url = 'https://www.ixigua.com/api/mixVideo/information?'
+        headers = {
+            "accept-encoding": "gzip, deflate",
+            "accept-language": "zh-CN,zh-Hans;q=0.9",
+            "user-agent": get_random_user_agent('pc'),
+            "referer": "https://www.ixigua.com/7102614741050196520?logTag=0531c88ac04f38ab2c62",
+        }
+        params = {
+            'mixId': str(item_id),
+            'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfC'
+                       'NVVIOBNjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
+            'X-Bogus': 'DFSzswVupYTANCJOSBk0P53WxM-r',
+            '_signature': '_02B4Z6wo0000119LvEwAAIDCuktNZ0y5wkdfS7jAALThuOR8D9yWNZ.EmWHKV0WSn6Px'
+                          'fPsH9-BldyxVje0f49ryXgmn7Tzk-swEHNb15TiGqa6YF.cX0jW8Eds1TtJOIZyfc9s5emH7gdWN94',
+        }
+        cookies = {
+            'ixigua-a-s': '1',
+            'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfCNVVIOB'
+                       'NjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
+            'ttwid': '1%7C_yXQeHWwLZgCsgHClOwTCdYSOt_MjdOkgnPIkpi-Sr8%7C1661241238%7Cf57d0c5ef3f1d7'
+                     '6e049fccdca1ac54887c34d1f8731c8e51a49780ff0ceab9f8',
+            'tt_scid': 'QZ4l8KXDG0YAEaMCSbADdcybdKbUfG4BC6S4OBv9lpRS5VyqYLX2bIR8CTeZeGHR9ee3',
+            'MONITOR_WEB_ID': '0a49204a-7af5-4e96-95f0-f4bafb7450ad',
+            '__ac_nonce': '06304878000964fdad287',
+            '__ac_signature': '_02B4Z6wo00f017Rcr3AAAIDCUVxeW1tOKEu0fKvAAI4cvoYzV-wBhq7B6D8k0no7lb'
+                              'FlvYoinmtK6UXjRIYPXnahUlFTvmWVtb77jsMkKAXzAEsLE56m36RlvL7ky.M3Xn52r9t1IEb7IR3ke8',
+            'ttcid': 'e56fabf6e85d4adf9e4d91902496a0e882',
+            '_tea_utm_cache_1300': 'undefined',
+            'support_avif': 'false',
+            'support_webp': 'false',
+            'xiguavideopcwebid': '7134967546256016900',
+            'xiguavideopcwebid.sig': 'xxRww5R1VEMJN_dQepHorEu_eAc',
+        }
+        urllib3.disable_warnings()
+        s = requests.session()
+        # max_retries=3 重试3次
+        s.mount('http://', HTTPAdapter(max_retries=3))
+        s.mount('https://', HTTPAdapter(max_retries=3))
+        response = s.get(url=url, headers=headers, params=params, cookies=cookies, verify=False, proxies=Common.tunnel_proxies(), timeout=5)
+        response.close()
+        if response.status_code != 200 or 'data' not in response.json() or response.json()['data'] == {}:
+            Log.logging(log_type, crawler).warning(f"get_video_info:{response.status_code}, {response.text}\n")
+            return None
+        else:
+            video_info = response.json()['data'].get("gidInformation", {}).get("packerData", {}).get("video", {})
+            if video_info == {}:
+                return None
+            video_dict = {
+                "video_title": video_info.get("title", ""),
+                "video_id": video_info.get("videoResource", {}).get("vid", ""),
+                "gid": str(item_id),
+                "play_cnt": int(video_info.get("video_watch_count", 0)),
+                "like_cnt": int(video_info.get("video_like_count", 0)),
+                "comment_cnt": int(cls.get_comment_cnt(item_id)),
+                "share_cnt": 0,
+                "favorite_cnt": 0,
+                "duration": int(video_info.get("video_duration", 0)),
+                "video_width": int(cls.get_video_url(video_info)["video_width"]),
+                "video_height": int(cls.get_video_url(video_info)["video_height"]),
+                "publish_time_stamp": int(video_info.get("video_publish_time", 0)),
+                "publish_time_str": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(video_info.get("video_publish_time", 0)))),
+                "user_name": video_info.get("user_info", {}).get("name", ""),
+                "user_id": str(video_info.get("user_info", {}).get("user_id", "")),
+                "avatar_url": str(video_info.get("user_info", {}).get("avatar_url", "")),
+                "cover_url": video_info.get("poster_url", ""),
+                "audio_url": cls.get_video_url(video_info)["audio_url"],
+                "video_url": cls.get_video_url(video_info)["video_url"],
+                "session": f"xigua-search-{int(time.time())}"
+            }
+            return video_dict
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        # sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
+        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, action="")
+        return len(repeat_video)
+
+    # 下载 / 上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, user_dict, video_dict, rule_dict, title_score, env):
+
+        Common.download_method(log_type=log_type, crawler=crawler, text='xigua_video',
+                               title=video_dict['video_title'], url=video_dict['video_url'])
+        # 下载音频
+        Common.download_method(log_type=log_type, crawler=crawler, text='xigua_audio',
+                               title=video_dict['video_title'], url=video_dict['audio_url'])
+        # 合成音视频
+        Common.video_compose(log_type=log_type, crawler=crawler,
+                             video_dir=f"./{crawler}/videos/{video_dict['video_title']}")
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Log.logging(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Log.logging(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            return
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text='cover',
+                               title=video_dict['video_title'], url=video_dict['cover_url'])
+        # 保存视频信息至txt
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Log.logging(log_type, crawler).info("开始上传视频...")
+        if env == "dev":
+            oss_endpoint = "out"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="搜索抓取策略",
+                                                      our_uid=user_dict["uid"],
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        else:
+            oss_endpoint = "inner"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="搜索抓取策略",
+                                                      our_uid=user_dict["uid"],
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+
+        if our_video_id is None:
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
+
+        # 视频信息保存数据库
+        insert_sql = f""" insert into crawler_video(video_id,
+                                user_id,
+                                out_user_id,
+                                platform,
+                                strategy,
+                                out_video_id,
+                                video_title,
+                                cover_url,
+                                video_url,
+                                duration,
+                                publish_time,
+                                play_cnt,
+                                crawler_rule,
+                                width,
+                                height)
+                                values({our_video_id},
+                                {user_dict["uid"]},
+                                "{video_dict['user_id']}",
+                                "{cls.platform}",
+                                "搜索爬虫策略",
+                                "{video_dict['video_id']}",
+                                "{video_dict['video_title']}",
+                                "{video_dict['cover_url']}",
+                                "{video_dict['video_url']}",
+                                {int(video_dict['duration'])},
+                                "{video_dict['publish_time_str']}",
+                                {int(video_dict['play_cnt'])},
+                                '{json.dumps(rule_dict)}',
+                                {int(video_dict['video_width'])},
+                                {int(video_dict['video_height'])}) """
+        Log.logging(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env, action="")
+        cls.download_cnt += 1
+        Log.logging(log_type, crawler).info("视频信息写入数据库完成")
+
+        # 视频信息写入飞书
+        Feishu.insert_columns(log_type, crawler, "BUNvGC", "ROWS", 1, 2)
+        values = [[title_score,
+            user_dict["link"],
+            time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
+            "关键词搜索",
+            video_dict['video_title'],
+            str(video_dict['video_id']),
+            our_video_link,
+            video_dict['gid'],
+            video_dict['play_cnt'],
+            video_dict['comment_cnt'],
+            video_dict['like_cnt'],
+            video_dict['share_cnt'],
+            video_dict['duration'],
+            str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
+            video_dict['publish_time_str'],
+            video_dict['user_name'],
+            video_dict['user_id'],
+            video_dict['avatar_url'],
+            video_dict['cover_url'],
+            video_dict['video_url'],
+            video_dict['audio_url']]]
+        time.sleep(0.5)
+        Feishu.update_values(log_type, crawler, "BUNvGC", "D2:Z2", values)
+        Log.logging(log_type, crawler).info('视频信息写入飞书完成\n')
+
+    @classmethod
+    def get_search_videos(cls, log_type, crawler, user_list, rule_dict, env):
+        Log.logging(log_type, crawler).info(f"搜索词总数:{len(user_list)}\n")
+        for user_dict in user_list:
+            try:
+                cls.download_cnt = 0
+                Log.logging(log_type, crawler).info(f"开始抓取 {user_dict['link']} 视频")
+                cls.get_videoList(log_type=log_type,
+                                  crawler=crawler,
+                                  user_dict=user_dict,
+                                  rule_dict=rule_dict,
+                                  env=env)
+            except Exception as e:
+                Log.logging(log_type, crawler).error(f"抓取{user_dict['link']}视频时异常:{e}\n")
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, user_dict, rule_dict, env):
+        # mq = MQ(topic_name="topic_crawler_etl_" + env)
+        # 打印请求配置
+        ca = DesiredCapabilities.CHROME
+        ca["goog:loggingPrefs"] = {"performance": "ALL"}
+        # # 不打开浏览器运行
+        chrome_options = webdriver.ChromeOptions()
+        chrome_options.add_argument(f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
+        chrome_options.add_argument("--headless")
+        chrome_options.add_argument("--window-size=1920,1080")
+        chrome_options.add_argument("--no-sandbox")
+        if env == "dev":
+            chromedriver = "/Users/wangkun/Downloads/chromedriver/chromedriver_v114/chromedriver"
+        else:
+            chromedriver = "/usr/bin/chromedriver"
+        # driver初始化
+        driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(chromedriver))
+        driver.implicitly_wait(10)
+        Log.logging(log_type, crawler).info(f"打开搜索页:{user_dict['link']}")
+        driver.get(f"https://www.ixigua.com/search/{user_dict['link']}/")
+        time.sleep(2)
+        Log.logging(log_type, crawler).info("关闭登录弹框")
+        if driver.find_elements(By.XPATH, '//*[@class="xg-notification-close"]') != 0:
+            driver.find_element(By.XPATH, '//*[@class="xg-notification-close"]').click()
+        # driver.get_screenshot_as_file(f"./{crawler}/photos/{user_dict['link']}-关闭登录弹框.png")
+        Log.logging(log_type, crawler).info("展开筛选按钮")
+        driver.find_element(By.XPATH, '//*[@class="searchPageV2__header-icons-categories"]').click()
+        Log.logging(log_type, crawler).info("点击最新排序")
+        driver.find_element(By.XPATH, '//*[@class="searchPageV2__header-categories-wrapper"]/*[1]/*[2]/*[1]').click()
+        time.sleep(1)
+        # driver.get_screenshot_as_file(f"./{crawler}/photos/{user_dict['link']}-最新排序.png")
+        Log.logging(log_type, crawler).info("收起筛选按钮\n")
+        driver.find_element(By.XPATH, '//*[@class="searchPageV2__header-icons-categories"]').click()
+
+        index = 0
+        num = 0
+        while True:
+            # video_elements = driver.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard searchPageV2__card single"]')
+            video_elements = driver.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard searchPageV2__card"]')
+            video_element_temp = video_elements[index:]
+            if len(video_element_temp) == 0:
+                Log.logging(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
+                driver.quit()
+                return
+            for i, video_element in enumerate(video_element_temp):
+                try:
+                    if cls.download_cnt >= int(rule_dict.get("videos_cnt", {}).get("min", 30)):
+                        Log.logging(log_type, crawler).info(f"搜索词: {user_dict['link']},已下载视频数: {cls.download_cnt}\n")
+                        driver.quit()
+                        return
+                    if video_element is None:
+                        Log.logging(log_type, crawler).info('到底啦~\n')
+                        driver.quit()
+                        return
+                    driver.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard searchPageV2__card"]')
+                    num += 1
+                    Log.logging(log_type, crawler).info(f'拖动"视频"列表第{num}个至屏幕中间')
+                    driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})", video_element)
+                    time.sleep(1)
+                    # driver.get_screenshot_as_file(f"./{crawler}/photos/{user_dict['link']}-{num}.png")
+                    title = video_element.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard__coverWrapper disableZoomAnimation"]')[index+i-1].get_attribute('title')
+                    publish_day = video_element.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard-accessories-bottomInfo__statistics"]')[index+i-1].text.split('· ')[-1]
+                    Log.logging(log_type, crawler).info(f"标题:{title}")
+                    Log.logging(log_type, crawler).info(f"发布时间:{publish_day}")
+                    if "年" in publish_day:
+                        Log.logging(log_type, crawler).info("发布时间超过 1 年\n")
+                        driver.quit()
+                        return
+
+                    item_id = video_element.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard__coverWrapper disableZoomAnimation"]')[index+i-1].get_attribute('href')
+                    item_id = item_id.split("com/")[-1].split("?&")[0]
+                    video_dict = cls.get_video_info(log_type, crawler, item_id)
+                    if video_dict is None:
+                        Log.logging(log_type, crawler).info("无效视频\n")
+                        continue
+                    for k, v in video_dict.items():
+                        Log.logging(log_type, crawler).info(f"{k}:{v}")
+
+                    # if int((int(time.time()) - int(video_dict["publish_time_stamp"])) / (3600 * 24)) > int(rule_dict.get("period", {}).get("max", 1000)):
+                    #     Log.logging(log_type, crawler).info(f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
+                    #     driver.quit()
+                    #     return
+
+                    if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                        Log.logging(log_type, crawler).info("不满足抓取规则\n")
+                    elif any(str(word) if str(word) in video_dict["video_title"] else False
+                             for word in get_config_from_mysql(log_type=log_type,
+                                                               source=crawler,
+                                                               env=env,
+                                                               text="filter",
+                                                               action="")) is True:
+                        Log.logging(log_type, crawler).info('已中过滤词\n')
+                    elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
+                        Log.logging(log_type, crawler).info('视频已下载\n')
+                    else:
+                        # title_score = get_title_score(log_type, "kuaishou", "16QspO", "0usaDk", video_dict["video_title"])
+                        # if title_score <= 0.3:
+                        #     Log.logging(log_type, crawler).info(f"权重分:{title_score}<=0.3\n")
+                        #     continue
+                        # Log.logging(log_type, crawler).info(f"权重分:{title_score}>0.3\n")
+                        # cls.download_publish(log_type=log_type,
+                        #                      crawler=crawler,
+                        #                      user_dict=user_dict,
+                        #                      video_dict=video_dict,
+                        #                      rule_dict=rule_dict,
+                        #                      title_score=title_score,
+                        #                      env=env)
+                        video_dict["out_user_id"] = video_dict["user_id"]
+                        video_dict["platform"] = crawler
+                        video_dict["strategy"] = log_type
+                        video_dict["out_video_id"] = video_dict["video_id"]
+                        video_dict["width"] = video_dict["video_width"]
+                        video_dict["height"] = video_dict["video_height"]
+                        video_dict["crawler_rule"] = json.dumps(rule_dict)
+                        video_dict["user_id"] = user_dict["uid"]
+                        video_dict["publish_time"] = video_dict["publish_time_str"]
+                        video_dict["strategy_type"] = log_type
+                        # mq.send_msg(video_dict)
+                        cls.download_cnt += 1
+                        Log.logging(log_type, crawler).info("已下载视频数+1\n")
+
+                except Exception as e:
+                    Log.logging(log_type, crawler).warning(f"抓取单条视频异常:{e}\n")
+
+            Log.logging(log_type, crawler).info('已抓取完一组视频,休眠10秒\n')
+            time.sleep(10)
+            index = index + len(video_element_temp)
+
+
+if __name__ == '__main__':
+
+    pass

+ 3 - 0
dev/logs/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/4

+ 32 - 18
gongzhonghao/gongzhonghao_author/gongzhonghao_author.py

@@ -20,7 +20,7 @@ from common.common import Common
 from common.feishu import Feishu
 from common.publish import Publish
 from common.scheduling_db import MysqlHelper
-from common.public import get_config_from_mysql, download_rule, title_like
+from common.public import get_config_from_mysql, download_rule, title_like, task_unbind
 
 
 class GongzhonghaoAuthor:
@@ -49,9 +49,9 @@ class GongzhonghaoAuthor:
 
     # 获取用户 fakeid
     @classmethod
-    def get_user_info(cls, log_type, crawler, token_index, wechat_name, env):
-        Common.logger(log_type, crawler).info(f"获取站外用户信息:{wechat_name}")
-        Common.logging(log_type, crawler, env, f"获取站外用户信息:{wechat_name}")
+    def get_user_info(cls, log_type, crawler, task_dict, user_dict, token_index, env):
+        Common.logger(log_type, crawler).info(f"获取站外用户信息:{user_dict['link']}")
+        Common.logging(log_type, crawler, env, f"获取站外用户信息:{user_dict['link']}")
         while True:
             token_dict = cls.get_token(log_type, crawler, token_index, env)
             url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
@@ -77,7 +77,7 @@ class GongzhonghaoAuthor:
                 "action": "search_biz",
                 "begin": "0",
                 "count": "5",
-                "query": str(wechat_name),
+                "query": str(user_dict['link']),
                 "token": token_dict['token'],
                 "lang": "zh_CN",
                 "f": "json",
@@ -100,13 +100,18 @@ class GongzhonghaoAuthor:
                     Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
                 time.sleep(60 * 15)
                 continue
-            if "list" not in r.json() or len(r.json()["list"]) == 0:
+            if r.json()["base_resp"]["err_msg"] == "ok" and len(r.json()["list"]) == 0:
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
                 Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
-                if 20 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
-                time.sleep(60 * 15)
-                continue
+                unbind_msg = task_unbind(log_type=log_type, crawler=crawler, taskid=task_dict['id'], uids=str(user_dict["uid"]), env=env)
+                if unbind_msg == "success":
+                    if 20 >= datetime.datetime.now().hour >= 10:
+                        Feishu.bot(log_type, crawler, f"公众号:{user_dict['link']}, 站内昵称:{user_dict['nick_name']}\n抓取异常, 已取消抓取该公众号\n")
+                    Common.logging(log_type, crawler, env, f"公众号:{user_dict['link']}, 站内昵称:{user_dict['nick_name']}\n抓取异常, 已取消抓取该公众号\n")
+                else:
+                    Common.logger(log_type, crawler).warning(f"unbind_msg:{unbind_msg}")
+                    Common.logging(log_type, crawler, env, f"unbind_msg:{unbind_msg}")
+                return None
             user_info_dict = {'user_name': r.json()["list"][0]["nickname"],
                               'user_id': r.json()["list"][0]["fakeid"],
                               'avatar_url': r.json()["list"][0]["round_head_img"]}
@@ -162,9 +167,16 @@ class GongzhonghaoAuthor:
 
     # 获取文章列表
     @classmethod
-    def get_videoList(cls, log_type, crawler, token_index, rule_dict, user_dict, env):
+    def get_videoList(cls, log_type, crawler, task_dict, token_index, rule_dict, user_dict, env):
         mq = MQ(topic_name="topic_crawler_etl_" + env)
-        user_info_dict = cls.get_user_info(log_type, crawler, token_index,user_dict["link"], env)
+        user_info_dict = cls.get_user_info(log_type=log_type,
+                                           crawler=crawler,
+                                           task_dict=task_dict,
+                                           user_dict=user_dict,
+                                           token_index=token_index,
+                                           env=env)
+        if user_info_dict is None:
+            return
         user_dict["user_id"] = user_info_dict["user_id"]
         user_dict["user_name"] = user_info_dict["user_name"]
         user_dict["avatar_url"] = user_info_dict["avatar_url"]
@@ -222,8 +234,9 @@ class GongzhonghaoAuthor:
             if r.json()["base_resp"]["err_msg"] == "invalid args" and r.json()["base_resp"]["ret"] == 200002:
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
                 Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
+                task_unbind(log_type=log_type, crawler=crawler, taskid=task_dict['id'], uids=str(user_dict["uid"]), env=env)
                 if 20 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler,f"公众号:{user_dict['link']}, 站内昵称:{user_dict['nick_name']}\n抓取异常, 请检查该公众号\n")
+                    Feishu.bot(log_type, crawler,f"公众号:{user_dict['link']}, 站内昵称:{user_dict['nick_name']}\n抓取异常, 已取消抓取该公众号\n")
                 return
             if 'app_msg_list' not in r.json():
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
@@ -452,13 +465,14 @@ class GongzhonghaoAuthor:
         Common.logging(log_type, crawler, env, '视频下载/上传成功\n')
 
     @classmethod
-    def get_all_videos(cls, log_type, crawler, token_index, rule_dict, user_list, env):
+    def get_all_videos(cls, log_type, crawler, task_dict, token_index, rule_dict, user_list, env):
         for user_dict in user_list:
-            Common.logger(log_type, crawler).info(f'获取:{user_dict["nick_name"]} 公众号视频\n')
-            Common.logging(log_type, crawler, env, f'获取:{user_dict["nick_name"]} 公众号视频\n')
+            Common.logger(log_type, crawler).info(f'抓取公众号:{user_dict["nick_name"]}\n')
+            Common.logging(log_type, crawler, env, f'抓取公众号:{user_dict["nick_name"]}\n')
             try:
                 cls.get_videoList(log_type=log_type,
                                   crawler=crawler,
+                                  task_dict=task_dict,
                                   token_index = token_index,
                                   rule_dict=rule_dict,
                                   user_dict=user_dict,
@@ -467,8 +481,8 @@ class GongzhonghaoAuthor:
                 Common.logging(log_type, crawler, env, '休眠 60 秒\n')
                 time.sleep(60)
             except Exception as e:
-                Common.logger(log_type, crawler).info(f'抓取{user_dict["nick_name"]}公众号时异常:{e}\n')
-                Common.logging(log_type, crawler, env, f'抓取{user_dict["nick_name"]}公众号时异常:{e}\n')
+                Common.logger(log_type, crawler).info(f'抓取公众号:{user_dict["nick_name"]}时异常:{e}\n')
+                Common.logging(log_type, crawler, env, f'抓取公众号:{user_dict["nick_name"]}时异常:{e}\n')
 
 
 if __name__ == "__main__":

+ 22 - 19
gongzhonghao/gongzhonghao_main/run_gzh_author.py

@@ -19,15 +19,17 @@ def get_author_videos(log_type, crawler, token_index, task_dict, rule_dict, user
     Common.logger(log_type, crawler).info(f"user_list:{user_list}")
     Common.logging(log_type, crawler, env, f"user_list:{user_list}")
     GongzhonghaoAuthor.get_all_videos(log_type=log_type,
-                                       crawler=crawler,
-                                       token_index = token_index,
-                                       rule_dict=rule_dict,
-                                       user_list = user_list,
-                                       env=env)
+                                      crawler=crawler,
+                                      task_dict=task_dict,
+                                      token_index=token_index,
+                                      rule_dict=rule_dict,
+                                      user_list=user_list,
+                                      env=env)
     Common.del_logs(log_type, crawler)
     Common.logger(log_type, crawler).info('抓取一轮结束\n')
     Common.logging(log_type, crawler, env, '抓取一轮结束\n')
 
+
 def main(log_type, crawler, topic_name, group_id, env):
     consumer = get_consumer(topic_name, group_id)
     # 长轮询表示如果Topic没有消息,则客户端请求会在服务端挂起3秒,3秒内如果有消息可以消费则立即返回响应。
@@ -40,9 +42,9 @@ def main(log_type, crawler, topic_name, group_id, env):
                                           f'TopicName:{topic_name}\n'
                                           f'MQConsumer:{group_id}')
     Common.logging(log_type, crawler, env, f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
-                                          f'WaitSeconds:{wait_seconds}\n'
-                                          f'TopicName:{topic_name}\n'
-                                          f'MQConsumer:{group_id}')
+                                           f'WaitSeconds:{wait_seconds}\n'
+                                           f'TopicName:{topic_name}\n'
+                                           f'MQConsumer:{group_id}')
     while True:
         try:
             # 长轮询消费消息。
@@ -59,15 +61,15 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                       f"ReceiptHandle:{msg.receipt_handle}\n"
                                                       f"Properties:{msg.properties}")
                 Common.logging(log_type, crawler, env, f"Receive\n"
-                                                      f"MessageId:{msg.message_id}\n"
-                                                      f"MessageBodyMD5:{msg.message_body_md5}\n"
-                                                      f"MessageTag:{msg.message_tag}\n"
-                                                      f"ConsumedTimes:{msg.consumed_times}\n"
-                                                      f"PublishTime:{msg.publish_time}\n"
-                                                      f"Body:{msg.message_body}\n"
-                                                      f"NextConsumeTime:{msg.next_consume_time}\n"
-                                                      f"ReceiptHandle:{msg.receipt_handle}\n"
-                                                      f"Properties:{msg.properties}")
+                                                       f"MessageId:{msg.message_id}\n"
+                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                       f"MessageTag:{msg.message_tag}\n"
+                                                       f"ConsumedTimes:{msg.consumed_times}\n"
+                                                       f"PublishTime:{msg.publish_time}\n"
+                                                       f"Body:{msg.message_body}\n"
+                                                       f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                       f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                       f"Properties:{msg.properties}")
                 # ack_mq_message
                 ack_message(log_type=log_type, crawler=crawler, recv_msgs=recv_msgs, consumer=consumer)
 
@@ -100,7 +102,8 @@ def main(log_type, crawler, topic_name, group_id, env):
                 for i in range(crawler_num):
                     start = i * chunk_size
                     end = min((i + 1) * chunk_size, user_num + 1)
-                    process = Process(target=get_author_videos, args=(f"{log_type}{i+1}", crawler, i+1, task_dict, rule_dict, user_list[start:end], env))
+                    process = Process(target=get_author_videos, args=(
+                    f"{log_type}{i + 1}", crawler, i + 1, task_dict, rule_dict, user_list[start:end], env))
                     process.start()
                     processes.append(process)
 
@@ -132,4 +135,4 @@ if __name__ == "__main__":
          crawler=args.crawler,
          topic_name=args.topic_name,
          group_id=args.group_id,
-         env=args.env)
+         env=args.env)

+ 44 - 47
gongzhonghao/gongzhonghao_main/run_gzh_author_dev.py

@@ -1,19 +1,23 @@
 # -*- coding: utf-8 -*-
 # @Author: wangkun
 # @Time: 2023/7/3
+import os
+import sys
+from multiprocessing import Process
+sys.path.append(os.getcwd())
 from common.common import Common
 from common.scheduling_db import MysqlHelper
-from common.public import task_fun_mq
 from gongzhonghao.gongzhonghao_author.gongzhonghao_author import GongzhonghaoAuthor
 
 
-def get_author_videos(log_type, crawler, token_index, task_dict, rule_dict, user_list, env):
-    Common.logger(log_type, crawler).info(f'开始抓取:{task_dict["taskName"]}\n')
-    Common.logging(log_type, crawler, env, f'开始抓取:{task_dict["taskName"]}\n')
+def get_author_videos(log_type, crawler, task_dict, token_index, rule_dict, user_list, env):
+    Common.logger(log_type, crawler).info(f'开始抓取:公众号账号\n')
+    Common.logging(log_type, crawler, env, f'开始抓取:公众号账号\n')
     Common.logger(log_type, crawler).info(f"user_list:{user_list}")
     Common.logging(log_type, crawler, env, f"user_list:{user_list}")
     GongzhonghaoAuthor.get_all_videos(log_type=log_type,
                                        crawler=crawler,
+                                      task_dict=task_dict,
                                        token_index = token_index,
                                        rule_dict=rule_dict,
                                        user_list = user_list,
@@ -22,47 +26,40 @@ def get_author_videos(log_type, crawler, token_index, task_dict, rule_dict, user
     Common.logger(log_type, crawler).info('抓取一轮结束\n')
     Common.logging(log_type, crawler, env, '抓取一轮结束\n')
 
-def main(log_type, crawler, topic_name, group_id, env):
+def main(log_type, crawler, env):
+            task_dict = {'createTime': 1688382816512, 'id': 54, 'interval': 200, 'machine': 'aliyun', 'mode': 'author', 'operator': '王坤', 'rule': {'period': {'min': 1, 'max': 1}, 'duration': {'min': 20, 'max': 2700}}, 'source': 'gongzhonghao', 'spiderName': 'run_gzh_author', 'startTime': 1688456874000, 'status': 0, 'taskName': '公众号账号', 'updateTime': 1688456876643}
 
-            # # 解析 task_dict
-            # task_dict = task_fun_mq(msg.message_body)['task_dict']
-            # Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
-            # Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
-            #
-            # # 解析 rule_dict
-            # rule_dict = task_fun_mq(msg.message_body)['rule_dict']
-            # Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}\n")
-            # Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}\n")
-            #
-            # # 解析 user_list
-            # task_id = task_dict['id']
-            # select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
-            # user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
-            #
-            # # 计算启动脚本数 crawler_num
-            # user_num = len(user_list)
-            # chunk_size = 100  # 每个进程处理的用户数量
-            # crawler_num = int(user_num // chunk_size)  # 向下取整
-            # if user_num % chunk_size != 0:
-            #     crawler_num += 1
-            # Common.logger(log_type, crawler).info(f"共{user_num}个公众号,需要启动{crawler_num}个脚本任务")
-            # Common.logging(log_type, crawler, env, f"共{user_num}个公众号,需要启动{crawler_num}个脚本任务")
-            #
-            # # 多进程并行抓取
-            # processes = []
-            # for i in range(crawler_num):
-            #     start = i * chunk_size
-            #     end = min((i + 1) * chunk_size, user_num + 1)
-            #     process = Process(target=get_author_videos, args=(f"{log_type}{i+1}", crawler, i+1, task_dict, rule_dict, user_list[start:end], env))
-            #     process.start()
-            #     processes.append(process)
-            #
-            # for process in processes:
-            #     process.join()
-            #
-            #
-            # Common.logger(log_type, crawler).info(f"Consume Message Fail! Exception:{err}\n")
-            # Common.logging(log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n")
-            # time.sleep(2)
-            # continue
-            pass
+            # 解析 rule_dict
+            rule_dict = {"period":{"min":1,"max":1},"duration":{"min":20,"max":2700}}
+            Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}\n")
+            Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}\n")
+
+            # 解析 user_list
+            task_id = 54
+            select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+            user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+
+            # 计算启动脚本数 crawler_num
+            user_num = len(user_list)
+            chunk_size = 2  # 每个进程处理的用户数量
+            crawler_num = int(user_num // chunk_size)  # 向下取整
+            if user_num % chunk_size != 0:
+                crawler_num += 1
+            Common.logger(log_type, crawler).info(f"共{user_num}个公众号,需要启动{crawler_num}个脚本任务")
+            Common.logging(log_type, crawler, env, f"共{user_num}个公众号,需要启动{crawler_num}个脚本任务")
+
+            # 多进程并行抓取
+            processes = []
+            for i in range(crawler_num):
+                start = i * chunk_size
+                end = min((i + 1) * chunk_size, user_num + 1)
+                process = Process(target=get_author_videos, args=(f"{log_type}{i+1}", crawler, task_dict, i+1, rule_dict, user_list[start:end], env))
+                process.start()
+                processes.append(process)
+
+            for process in processes:
+                process.join()
+
+
+if __name__ == "__main__":
+    main(log_type="author", crawler="gongzhonghao", env="dev")

+ 2 - 2
main/process_mq.sh

@@ -28,8 +28,8 @@ else
 fi
 
 echo run_${crawler}_${log_type}.py
-echo topic_${crawler}_${log_type}_${env}
-echo GID_${crawler}_${log_type}_${env}
+echo topic:${crawler}_${log_type}_${env}
+echo GID:${crawler}_${log_type}_${env}
 
 time=$(date +%H:%M:%S)
 echo "$(date "+%Y-%m-%d %H:%M:%S") 开始监测爬虫进程状态" >> ${log_path}

+ 3 - 3
xigua/xigua_main/run_xg_search_dev.py

@@ -5,12 +5,12 @@ import os
 import sys
 sys.path.append(os.getcwd())
 from common.common import Common
-from xigua.xigua_search.xigua_search import XiguasearchScheduling
+from xigua.xigua_search.xigua_search_publish_time import XiguasearchScheduling
 
 
 def xigua_search_main(log_type, crawler, env):
-    Common.logger(log_type, crawler).info("开始抓取西瓜搜索\n")
-    Common.logging(log_type, crawler, env, "开始抓取西瓜搜索\n")
+    Common.logger(log_type, crawler).info("开始抓取西瓜搜索")
+    Common.logging(log_type, crawler, env, "开始抓取西瓜搜索")
     XiguasearchScheduling.get_search_videos(log_type=log_type,
                                             crawler=crawler,
                                             rule_dict={"play_cnt":{"min":8000,"max":0},"duration":{"min":60,"max":600},"period":{"min":365,"max":365},"videos_cnt":{"min":30,"max":0}},

+ 3 - 3
xigua/xigua_search/xigua_search_publish_time.py

@@ -686,7 +686,7 @@ class XiguasearchScheduling:
                         video_dict["strategy_type"] = log_type
                         mq.send_msg(video_dict)
                         cls.download_cnt += 1
-                        Common.logging(log_type, crawler, env, "已下载视频数+1")
+                        Common.logging(log_type, crawler, env, "已下载视频数+1\n")
 
                 except Exception as e:
                     Common.logger(log_type, crawler).warning(f"抓取单条视频异常:{e}\n")
@@ -840,8 +840,8 @@ class XiguasearchScheduling:
         for user_dict in user_list:
             try:
                 cls.download_cnt = 0
-                Common.logger(log_type, crawler).info(f"开始抓取 {user_dict['link']} 视频\n")
-                Common.logging(log_type, crawler, env, f"开始抓取 {user_dict['link']} 视频\n")
+                Common.logger(log_type, crawler).info(f"开始抓取 {user_dict['link']} 视频")
+                Common.logging(log_type, crawler, env, f"开始抓取 {user_dict['link']} 视频")
                 cls.get_videoList(log_type=log_type,
                                   crawler=crawler,
                                   user_dict=user_dict,