wangkun 1 year ago
parent
commit
065764cd1e

+ 0 - 30
dev/dev_main/run_dev.py

@@ -1,30 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/6/13
-import os
-import sys
-import time
-sys.path.append(os.getcwd())
-from common.log import Log
-from common.common import Common
-from dev.dev_script.xigua_search_publish_time import XiguasearchScheduling
-
-
-def xigua_search_main(log_type, crawler, env):
-    while True:
-        Log.logging(log_type, crawler).info("开始抓取西瓜搜索")
-        XiguasearchScheduling.get_search_videos(log_type=log_type,
-                                                crawler=crawler,
-                                                rule_dict={"play_cnt":{"min":8000,"max":0},"duration":{"min":60,"max":600},"period":{"min":365,"max":365},"videos_cnt":{"min":30,"max":0}},
-                                                user_list=[{"uid": 6267140, "source": "xigua", "link": "健康", "nick_name": "健康", "avatar_url": "http://rescdn.yishihui.com/user/default/avatar/live/1616555578819_u=1922778943,2660693611&fm=26&gp=0.jpg", "mode": "search"},
-                                                           {"uid": 6267140, "source": "xigua", "link": "瓦格纳", "nick_name": "瓦格纳", "avatar_url": "http://rescdn.yishihui.com/user/default/avatar/live/1616555578819_u=1922778943,2660693611&fm=26&gp=0.jpg", "mode": "search"},
-                                                           {"uid": 6267141, "source": "xigua", "link": "高考分数线", "nick_name": "高考分数线", "avatar_url": "http://rescdn.yishihui.com/user/default/avatar/live/1616555578819_u=1922778943,2660693611&fm=26&gp=0.jpg", "mode": "search"}],
-                                                env=env)
-        Common.del_logs(log_type, crawler)
-        Log.logging(log_type, crawler).info("抓取一轮结束\n")
-        Log.logging(log_type, crawler).info("休眠 1 小时")
-        time.sleep(3600)
-
-
-if __name__ == "__main__":
-    xigua_search_main("search", "dev", "dev")

+ 0 - 107
dev/dev_script/get_img.py

@@ -1,107 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/9/11
-from urllib.parse import urlencode
-from urllib.request import urlretrieve
-
-import requests
-import time
-
-
-def getPage(offset):
-    """获取网页信息"""
-    data = {
-        'tn': 'resultjson_com',
-        'ipn': 'rj',
-        'ct': '201326592',
-        'is': '',
-        'fp': 'result',
-        'queryWord': '街拍',
-        'cl': '2',
-        'lm': '-1',
-        'ie': 'utf - 8',
-        'oe': 'utf - 8',
-        'adpicid': '',
-        'st': '-1',
-        'z': '',
-        'ic': '0',
-        'hd': '',
-        'latest': '',
-        'copyright': '',
-        'word': '街拍',
-        's': '',
-        'se': '',
-        'tab': '',
-        'width': '',
-        'height': '',
-        'face': '0',
-        'istype': '2',
-        'qc': '',
-        'nc': '1',
-        'fr': '',
-        'expermode': '',
-        'force': '',
-        'pn': offset,
-        'rn': '30',
-        'gsm': '1e',
-        '1551789143500': '',
-    }
-    headers = {
-        'Accept': 'text/plain, */*; q=0.01',
-        'Accept-Encoding': 'deflate, br',
-        'Accept-Language': 'Accept-Language',
-        'Connection': 'keep-alive',
-        'Cookie': 'BDqhfp=%E8%A1%97%E6%8B%8D%26%260-10-1undefined%26%260%26%261; BIDUPSID=7CA5F033CA22949F5FB6110DBC5DC1EE; BAIDUID=6DDE5BAA44763FD6C7CA84401CB19F36:FG=1; indexPageSugList=%5B%22%E8%A1%97%E6%8B%8D%22%5D; BDRCVFR[dG2JNJb_ajR]=mk3SLVN4HKm; BDRCVFR[-pGxjrCMryR]=mk3SLVN4HKm; uploadTime=1551768107224; userFrom=null; BDRCVFR[X_XKQks0S63]=mk3SLVN4HKm; firstShowTip=1; cleanHistoryStatus=0',
-        'Host': 'image.baidu.com',
-        'Referer': 'https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=index&fr=&hs=0&xthttps=111111&sf=1&fmq=&pv=&ic=0&nc=1&z=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&word=%E8%A1%97%E6%8B%8D&oq=%E8%A1%97%E6%8B%8D&rsp=-1',
-        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6735.400 QQBrowser/10.2.2328.400',
-        'X-Requested-With': 'XMLHttpRequest',
-    }
-    url = 'https://image.baidu.com/search/acjson?' + urlencode(data)
-    try:
-        res = requests.get(url, data=data, headers=headers)
-        res.encoding = 'utf-8'  # 网页信息编码
-        if res.status_code == 200:
-            return res.json()
-    except requests.ConnectionError:
-        return None
-
-
-def getImage(json):
-    """解析网页数据并爬取所需的信息"""
-    try:
-        data = json.get('data')
-        if data:
-            for item in data:
-                yield {
-                    'image': item.get('hoverURL'),
-                    'title': item.get('fromPageTitleEnc'),
-                }
-    except:
-        return None
-
-
-def saveImage(item):
-    """把获取的图片与标题封装并存储"""
-    try:
-        m = item.get('title')
-        local_image = item.get('image')  # 获取图片的url
-        image_url = local_image
-        urlretrieve(image_url, './pic/' + str(m) + '.jpg')
-        # print('p'+str(m) + '.jpg')
-    except:
-        return None
-
-
-def main(offset):
-    """调度爬取函数和存储"""
-    json = getPage(offset)
-    for item in getImage(json):
-        print(item)
-        saveImage(item)
-
-
-if __name__ == '__main__':
-    for i in range(5):  # 此处循环遍历五次是不可行的  每次data值中的gsm在变化
-        main(offset=i * 30)
-        time.sleep(1)

+ 0 - 30
dev/dev_script/mitmproxy_test.py

@@ -1,30 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/7/24
-from mitmproxy import ctx
-
-
-class ProxyData:
-    def __init__(self):
-        self.num = 0
-
-    def request(self, flow):
-        self.num = self.num + 1
-        ctx.log.info("We've seen %d flows" % self.num)
-
-
-addons = [
-    ProxyData()
-]
-
-
-if __name__ == "__main__":
-    ProxyData.start_proxy()
-    ProxyData.start_selenium()
-    print("requests_data:", ProxyData.requests_data)
-    print("response_data:", ProxyData.response_data)
-
-    # 分析包含链接 www.douyin.com 的响应数据
-    for response in ProxyData.response_data:
-        if "www.douyin.com" in response['url']:
-            print("Douyin response:", response)

+ 0 - 33
dev/dev_script/shipinhao.py

@@ -1,33 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/7/21
-import requests
-from bs4 import BeautifulSoup
-
-class Shipinhao:
-    @classmethod
-    def get_shipinhao(cls):
-        # 微信视频号推荐页面的URL
-        url = 'https://mp.weixin.qq.com/mp/videoplayer?action=get_recommend_video_list&__biz=MzI1OTQxMjE0Nw==&uin=&key=&pass_ticket=&wxtoken=777&devicetype=Windows+10&clientversion=1000&appmsg_token=cc11373ab7db78508003b6d2f46bab1a779666d3&f=json'
-
-        # 发送GET请求并获取响应
-        response = requests.get(url)
-
-        # 解析响应的JSON数据
-        data = response.json()
-        print(f'data: {data}')
-        # 解析推荐视频列表
-        video_list = data['recommend_video_list']
-        for video in video_list:
-            # 获取视频标题
-            title = video['title']
-            # 获取视频URL
-            video_url = video['video_url']
-            # 打印视频标题和URL
-            print(f'Title: {title}')
-            print(f'Video URL: {video_url}')
-            print('---')
-
-
-if __name__ == "__main__":
-    Shipinhao.get_shipinhao()

+ 17 - 0
dev/dev_script/test.py

@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/9/19
+
+
+class Test:
+
+    @classmethod
+    def list_test(cls):
+        list1 = ["a", "b", "c", 1, 2, 3]
+        list2 = ["a", "d", "e", 4, 2, 3]
+        list3 = list(set(list2).difference(set(list1)))
+        print(list3)
+
+
+if __name__ == "__main__":
+    Test.list_test()

+ 0 - 248
dev/dev_script/xg_recommend2.py

@@ -1,248 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/7/6
-import os
-import random
-import sys
-import time
-import cv2
-import requests
-import urllib3
-from selenium.webdriver import DesiredCapabilities, ActionChains
-from selenium import webdriver
-from selenium.webdriver.chrome.service import Service
-from selenium.webdriver.common.by import By
-sys.path.append(os.getcwd())
-from common.common import Common
-
-
-class XGRecommend(object):
-
-    def __init__(self, log_type, crawler, env):
-        """
-        本地启动 Chrome,指定端口号:12306
-        open -a "Google Chrome" --args --remote-debugging-port=12306
-        """
-        # Common.logger(log_type, crawler).info("启动 Chrome 浏览器")
-        # cmd = 'open -a "Google Chrome" --args --remote-debugging-port=12306'
-        # os.system(cmd)
-
-        if env == "dev":
-            chromedriver = "/Users/wangkun/Downloads/chromedriver/chromedriver_v114/chromedriver"
-        else:
-            chromedriver = "/usr/bin/chromedriver"
-
-        # 打印请求配置
-        ca = DesiredCapabilities.CHROME
-        ca["goog:loggingPrefs"] = {"performance": "ALL"}
-        # 初始化浏览器
-        self.browser = webdriver.ChromeOptions()
-        # 设置user-agent
-        self.browser.add_argument(
-            f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
-        # 去掉提示:Chrome正收到自动测试软件的控制
-        self.browser.add_argument('--disable-infobars')
-
-        # 禁用GPU加速
-        self.browser.add_argument('--disable-gpu')
-        # 关闭开发者模式
-        self.browser.add_experimental_option("useAutomationExtension", False)
-        # 以键值对的形式加入参数
-        self.browser.add_experimental_option('excludeSwitches', ['enable-automation'])
-        # 禁用启用Blink运行时的功能
-        self.browser.add_argument('--disable-blink-features=AutomationControlled')
-        # 不打开浏览器运行
-        # self.browser.add_argument("--headless")
-        # linux 环境中,静默模式必须要加的参数
-        # self.browser.add_argument("--no-sandbox")
-        # 设置浏览器size
-        self.browser.add_argument("--window-size=1920,1080")
-
-        # driver初始化
-        self.driver = webdriver.Chrome(desired_capabilities=ca, options=self.browser, service=Service(chromedriver))
-        self.driver.implicitly_wait(10)
-        Common.logger(log_type, crawler).info("打开西瓜推荐页")
-        self.driver.get(f"https://www.ixigua.com/")
-        self.username = "19831265541"
-        self.password = "Test111111"
-        time.sleep(2)
-
-    def quit(self, log_type, crawler):
-        Common.logger(log_type, crawler).info("退出浏览器")
-        self.driver.quit()
-
-    # 返回两个数组:一个用于加速拖动滑块,一个用于减速拖动滑块
-    @staticmethod
-    def generate_tracks(distance):
-        # 给距离加上20,这20像素用在滑块滑过缺口后,减速折返回到缺口
-        distance += 20
-        v = 0
-        t = 0.2
-        forward_tracks = []
-        current = 0
-        mid = distance * 3 / 5  # 减速阀值
-        while current < distance:
-            if current < mid:
-                a = 2  # 加速度为+2
-            else:
-                a = -3  # 加速度-3
-            s = v * t + 0.5 * a * (t ** 2)
-            v = v + a * t
-            current += s
-            forward_tracks.append(round(s))
-
-        back_tracks = [-3, -3, -2, -2, -2, -2, -2, -1, -1, -1, -1]
-        return forward_tracks, back_tracks
-
-    # 获取距离值
-    @staticmethod
-    def get_tracks(distance):
-        """
-        模拟人的滑动行为,先匀加速后匀减速
-        匀变速基本公式
-        v=v0+at
-        s=vot+1/2at2
-        """
-        # 初速度
-        v = 0
-        # 设置时间
-        t = 0.3
-        # 存储每段距离值
-        tracks = []
-        # 当前距离
-        current = 0
-        # 中间位置为4/5距离处
-        mid = distance * 4 / 5
-        while current < distance:
-            if current < mid:
-                # 加速阶段
-                a = 2
-            else:
-                # 减速阶段
-                a = -3
-            # 当前速度
-            v0 = v
-            # 当前位移
-            s = v0 * t + 0.5 * a * t ** 2
-            # 更新当前速度
-            v = v0 + a * t
-            # 更新当前位移
-            current += s
-            # 添加到轨迹列表
-            tracks.append(round(s))
-        return tracks
-
-    @staticmethod
-    def FindPic(log_type, crawler, target, template):
-        """
-        找出图像中最佳匹配位置
-        :param log_type: 日志
-        :param crawler: 爬虫
-        :param target: 目标即背景图
-        :param template: 模板即需要找到的图
-        :return: 返回最佳匹配及其最差匹配和对应的坐标
-        """
-        target_rgb = cv2.imread(target)
-        target_gray = cv2.cvtColor(target_rgb, cv2.COLOR_BGR2GRAY)
-        template_rgb = cv2.imread(template, 0)
-        res = cv2.matchTemplate(target_gray, template_rgb, cv2.TM_CCOEFF_NORMED)
-        value = cv2.minMaxLoc(res)
-        Common.logger(log_type, crawler).info(value)
-        # 计算缺口的 X 轴距离
-        x_val = int(value[3][0])
-        Common.logger(log_type, crawler).info(f"缺口的 X 轴距离:{x_val}")
-        # 获取模板图的宽高
-        template_height, template_width, template_c = cv2.imread(template).shape
-        Common.logger(log_type, crawler).info(f"模板高:{template_height}")
-        Common.logger(log_type, crawler).info(f"模板宽:{template_width}")
-        Common.logger(log_type, crawler).info(f"图片的通道数:{template_c}")
-        # 计算需要滑动的距离
-        move_val = x_val - template_width
-        Common.logger(log_type, crawler).info(f"需要滑动的距离:{move_val}")
-        return x_val
-
-    def login(self, log_type, crawler, env):
-        Common.logger(log_type, crawler).info("点击登录")
-        self.driver.find_element(By.XPATH, '//*[@class="xg-button xg-button-primary xg-button-middle loginButton"]').click()
-        time.sleep(random.randint(1, 2))
-        Common.logger(log_type, crawler).info("点击密码登录")
-        self.driver.find_element(By.XPATH, '//*[@class="web-login-link-list__item__text"]').click()
-        time.sleep(random.randint(1, 2))
-        Common.logger(log_type, crawler).info("输入手机号")
-        self.driver.find_element(By.XPATH, '//*[@class="web-login-normal-input__input"]').send_keys(self.username)
-        time.sleep(random.randint(1, 2))
-        Common.logger(log_type, crawler).info("输入密码")
-        self.driver.find_element(By.XPATH, '//*[@class="web-login-button-input__input"]').send_keys(self.password)
-        time.sleep(random.randint(1, 2))
-        Common.logger(log_type, crawler).info("点击登录")
-        self.driver.find_element(By.XPATH, '//*[@class="web-login-account-password__button-wrapper"]/*[1]').click()
-        time.sleep(random.randint(1, 2))
-
-        # 获取滑块
-        Common.logger(log_type, crawler).info("获取滑块")
-        move_btns = self.driver.find_elements(By.XPATH, '//*[@class="sc-kkGfuU bujTgx"]')
-        if len(move_btns) == 0:
-            Common.logger(log_type, crawler).info("未发现滑块,3-5 秒后重试")
-            self.quit(log_type, crawler)
-            time.sleep(random.randint(3, 5))
-            self.__init__(log_type, crawler, env)
-            self.login(log_type, crawler, env)
-        move_btn = move_btns[0]
-
-        while True:
-
-            # 使用requests下载滑块
-            slide_url = self.driver.find_element(By.XPATH,
-                                                 '//*[@class="captcha_verify_img_slide react-draggable sc-VigVT ggNWOG"]').get_attribute(
-                "src")
-            slide_dir = f"./{crawler}/photo/img_slide.png"
-            urllib3.disable_warnings()
-            slide_url_response = requests.get(slide_url, verify=False)
-            with open(slide_dir, "wb") as file:
-                file.write(slide_url_response.content)
-
-            # 使用urllib下载背景图
-            bg_image_url = self.driver.find_element(By.XPATH, '//*[@id="captcha-verify-image"]').get_attribute("src")
-            bg_image_dir = f"./{crawler}/photo/img_bg.png"
-            urllib3.disable_warnings()
-            bg_image_url_response = requests.get(bg_image_url, verify=False)
-            with open(bg_image_dir, "wb") as file:
-                file.write(bg_image_url_response.content)
-
-            offset = self.FindPic(log_type, crawler, bg_image_dir, slide_dir)
-            Common.logger(log_type, crawler).info(f"offset:{offset}")
-
-            # 在滑块上暂停
-            Common.logger(log_type, crawler).info("在滑块上暂停")
-            ActionChains(self.driver).click_and_hold(on_element=move_btn).perform()
-            # 拖动滑块
-            Common.logger(log_type, crawler).info("拖动滑块0.7*距离")
-            ActionChains(self.driver).move_to_element_with_offset(to_element=move_btn, xoffset=int(0.5*offset), yoffset=0).perform()
-            # 拖动剩余像素
-            Common.logger(log_type, crawler).info("拖动剩余像素")
-            tracks = self.get_tracks(int(0.15*offset))
-            # 遍历梅一段距离
-            for track in tracks:
-                # 滑块移动响应距离
-                ActionChains(self.driver).move_by_offset(xoffset=track, yoffset=0).perform()
-            # 休息1s
-            Common.logger(log_type, crawler).info("休息1s")
-            time.sleep(1)
-            # 释放滑块
-            Common.logger(log_type, crawler).info("释放滑块")
-            ActionChains(self.driver).release().perform()
-
-            if len(move_btns) != 0:
-                time.sleep(1)
-                continue
-            break
-        time.sleep(5)
-        Common.logger(log_type, crawler).info("退出浏览器")
-        self.quit(log_type, crawler)
-
-
-
-if __name__ == "__main__":
-    Recommend = XGRecommend("search", "dev", "dev")
-    Recommend.login("search", "dev", "dev")
-    pass

+ 0 - 828
dev/dev_script/xigua_search_publish_time.py

@@ -1,828 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/5/26
-import base64
-import json
-import os
-import random
-import shutil
-import string
-import sys
-import time
-from hashlib import md5
-import requests
-import urllib3
-from requests.adapters import HTTPAdapter
-from selenium.webdriver import DesiredCapabilities
-from selenium.webdriver.chrome.service import Service
-from selenium import webdriver
-from selenium.webdriver.common.by import By
-sys.path.append(os.getcwd())
-from common.log import Log
-# from common.mq import MQ
-from common.scheduling_db import MysqlHelper
-from common.common import Common
-from common.feishu import Feishu
-from common.publish import Publish
-from common.public import get_config_from_mysql, download_rule
-from common.userAgent import get_random_user_agent
-
-
-class XiguasearchScheduling:
-    # 已下载视频数
-    download_cnt = 0
-    platform = "西瓜视频"
-
-    @classmethod
-    def random_signature(cls):
-        src_digits = string.digits  # string_数字
-        src_uppercase = string.ascii_uppercase  # string_大写字母
-        src_lowercase = string.ascii_lowercase  # string_小写字母
-        digits_num = random.randint(1, 6)
-        uppercase_num = random.randint(1, 26 - digits_num - 1)
-        lowercase_num = 26 - (digits_num + uppercase_num)
-        password = random.sample(src_digits, digits_num) + random.sample(src_uppercase, uppercase_num) + random.sample(
-            src_lowercase, lowercase_num)
-        random.shuffle(password)
-        new_password = 'AAAAAAAAAA' + ''.join(password)[10:-4] + 'AAAB'
-        new_password_start = new_password[0:18]
-        new_password_end = new_password[-7:]
-        if new_password[18] == '8':
-            new_password = new_password_start + 'w' + new_password_end
-        elif new_password[18] == '9':
-            new_password = new_password_start + 'x' + new_password_end
-        elif new_password[18] == '-':
-            new_password = new_password_start + 'y' + new_password_end
-        elif new_password[18] == '.':
-            new_password = new_password_start + 'z' + new_password_end
-        else:
-            new_password = new_password_start + 'y' + new_password_end
-        return new_password
-
-    @classmethod
-    def get_video_url(cls, video_info):
-        video_url_dict = {}
-        # video_url
-        if 'videoResource' not in video_info:
-            video_url_dict["video_url"] = ''
-            video_url_dict["audio_url"] = ''
-            video_url_dict["video_width"] = 0
-            video_url_dict["video_height"] = 0
-
-        elif 'dash_120fps' in video_info['videoResource']:
-            if "video_list" in video_info['videoResource']['dash_120fps'] and 'video_4' in \
-                    video_info['videoResource']['dash_120fps']['video_list']:
-                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_4']['backup_url_1']
-                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_4']['backup_url_1']
-                if len(video_url) % 3 == 1:
-                    video_url += '=='
-                elif len(video_url) % 3 == 2:
-                    video_url += '='
-                elif len(audio_url) % 3 == 1:
-                    audio_url += '=='
-                elif len(audio_url) % 3 == 2:
-                    audio_url += '='
-                video_url = base64.b64decode(video_url).decode('utf8')
-                audio_url = base64.b64decode(audio_url).decode('utf8')
-                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_4']['vwidth']
-                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_4']['vheight']
-                video_url_dict["video_url"] = video_url
-                video_url_dict["audio_url"] = audio_url
-                video_url_dict["video_width"] = video_width
-                video_url_dict["video_height"] = video_height
-            elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_3' in \
-                    video_info['videoResource']['dash_120fps']['video_list']:
-                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_3']['backup_url_1']
-                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_3']['backup_url_1']
-                if len(video_url) % 3 == 1:
-                    video_url += '=='
-                elif len(video_url) % 3 == 2:
-                    video_url += '='
-                elif len(audio_url) % 3 == 1:
-                    audio_url += '=='
-                elif len(audio_url) % 3 == 2:
-                    audio_url += '='
-                video_url = base64.b64decode(video_url).decode('utf8')
-                audio_url = base64.b64decode(audio_url).decode('utf8')
-                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_3']['vwidth']
-                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_3']['vheight']
-                video_url_dict["video_url"] = video_url
-                video_url_dict["audio_url"] = audio_url
-                video_url_dict["video_width"] = video_width
-                video_url_dict["video_height"] = video_height
-            elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_2' in \
-                    video_info['videoResource']['dash_120fps']['video_list']:
-                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_2']['backup_url_1']
-                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_2']['backup_url_1']
-                if len(video_url) % 3 == 1:
-                    video_url += '=='
-                elif len(video_url) % 3 == 2:
-                    video_url += '='
-                elif len(audio_url) % 3 == 1:
-                    audio_url += '=='
-                elif len(audio_url) % 3 == 2:
-                    audio_url += '='
-                video_url = base64.b64decode(video_url).decode('utf8')
-                audio_url = base64.b64decode(audio_url).decode('utf8')
-                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_2']['vwidth']
-                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_2']['vheight']
-                video_url_dict["video_url"] = video_url
-                video_url_dict["audio_url"] = audio_url
-                video_url_dict["video_width"] = video_width
-                video_url_dict["video_height"] = video_height
-            elif "video_list" in video_info['videoResource']['dash_120fps'] and 'video_1' in \
-                    video_info['videoResource']['dash_120fps']['video_list']:
-                video_url = video_info['videoResource']['dash_120fps']['video_list']['video_1']['backup_url_1']
-                audio_url = video_info['videoResource']['dash_120fps']['video_list']['video_1']['backup_url_1']
-                if len(video_url) % 3 == 1:
-                    video_url += '=='
-                elif len(video_url) % 3 == 2:
-                    video_url += '='
-                elif len(audio_url) % 3 == 1:
-                    audio_url += '=='
-                elif len(audio_url) % 3 == 2:
-                    audio_url += '='
-                video_url = base64.b64decode(video_url).decode('utf8')
-                audio_url = base64.b64decode(audio_url).decode('utf8')
-                video_width = video_info['videoResource']['dash_120fps']['video_list']['video_1']['vwidth']
-                video_height = video_info['videoResource']['dash_120fps']['video_list']['video_1']['vheight']
-                video_url_dict["video_url"] = video_url
-                video_url_dict["audio_url"] = audio_url
-                video_url_dict["video_width"] = video_width
-                video_url_dict["video_height"] = video_height
-
-            elif 'dynamic_video' in video_info['videoResource']['dash_120fps'] \
-                    and 'dynamic_video_list' in video_info['videoResource']['dash_120fps']['dynamic_video'] \
-                    and 'dynamic_audio_list' in video_info['videoResource']['dash_120fps']['dynamic_video'] \
-                    and len(
-                video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list']) != 0 \
-                    and len(
-                video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_audio_list']) != 0:
-
-                video_url = \
-                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1][
-                        'backup_url_1']
-                audio_url = \
-                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_audio_list'][-1][
-                        'backup_url_1']
-                if len(video_url) % 3 == 1:
-                    video_url += '=='
-                elif len(video_url) % 3 == 2:
-                    video_url += '='
-                elif len(audio_url) % 3 == 1:
-                    audio_url += '=='
-                elif len(audio_url) % 3 == 2:
-                    audio_url += '='
-                video_url = base64.b64decode(video_url).decode('utf8')
-                audio_url = base64.b64decode(audio_url).decode('utf8')
-                video_width = \
-                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1][
-                        'vwidth']
-                video_height = \
-                    video_info['videoResource']['dash_120fps']['dynamic_video']['dynamic_video_list'][-1][
-                        'vheight']
-                video_url_dict["video_url"] = video_url
-                video_url_dict["audio_url"] = audio_url
-                video_url_dict["video_width"] = video_width
-                video_url_dict["video_height"] = video_height
-            else:
-                video_url_dict["video_url"] = ''
-                video_url_dict["audio_url"] = ''
-                video_url_dict["video_width"] = 0
-                video_url_dict["video_height"] = 0
-
-        elif 'dash' in video_info['videoResource']:
-            if "video_list" in video_info['videoResource']['dash'] and 'video_4' in \
-                    video_info['videoResource']['dash']['video_list']:
-                video_url = video_info['videoResource']['dash']['video_list']['video_4']['backup_url_1']
-                audio_url = video_info['videoResource']['dash']['video_list']['video_4']['backup_url_1']
-                if len(video_url) % 3 == 1:
-                    video_url += '=='
-                elif len(video_url) % 3 == 2:
-                    video_url += '='
-                elif len(audio_url) % 3 == 1:
-                    audio_url += '=='
-                elif len(audio_url) % 3 == 2:
-                    audio_url += '='
-                video_url = base64.b64decode(video_url).decode('utf8')
-                audio_url = base64.b64decode(audio_url).decode('utf8')
-                video_width = video_info['videoResource']['dash']['video_list']['video_4']['vwidth']
-                video_height = video_info['videoResource']['dash']['video_list']['video_4']['vheight']
-                video_url_dict["video_url"] = video_url
-                video_url_dict["audio_url"] = audio_url
-                video_url_dict["video_width"] = video_width
-                video_url_dict["video_height"] = video_height
-            elif "video_list" in video_info['videoResource']['dash'] and 'video_3' in \
-                    video_info['videoResource']['dash']['video_list']:
-                video_url = video_info['videoResource']['dash']['video_list']['video_3']['backup_url_1']
-                audio_url = video_info['videoResource']['dash']['video_list']['video_3']['backup_url_1']
-                if len(video_url) % 3 == 1:
-                    video_url += '=='
-                elif len(video_url) % 3 == 2:
-                    video_url += '='
-                elif len(audio_url) % 3 == 1:
-                    audio_url += '=='
-                elif len(audio_url) % 3 == 2:
-                    audio_url += '='
-                video_url = base64.b64decode(video_url).decode('utf8')
-                audio_url = base64.b64decode(audio_url).decode('utf8')
-                video_width = video_info['videoResource']['dash']['video_list']['video_3']['vwidth']
-                video_height = video_info['videoResource']['dash']['video_list']['video_3']['vheight']
-                video_url_dict["video_url"] = video_url
-                video_url_dict["audio_url"] = audio_url
-                video_url_dict["video_width"] = video_width
-                video_url_dict["video_height"] = video_height
-            elif "video_list" in video_info['videoResource']['dash'] and 'video_2' in \
-                    video_info['videoResource']['dash']['video_list']:
-                video_url = video_info['videoResource']['dash']['video_list']['video_2']['backup_url_1']
-                audio_url = video_info['videoResource']['dash']['video_list']['video_2']['backup_url_1']
-                if len(video_url) % 3 == 1:
-                    video_url += '=='
-                elif len(video_url) % 3 == 2:
-                    video_url += '='
-                elif len(audio_url) % 3 == 1:
-                    audio_url += '=='
-                elif len(audio_url) % 3 == 2:
-                    audio_url += '='
-                video_url = base64.b64decode(video_url).decode('utf8')
-                audio_url = base64.b64decode(audio_url).decode('utf8')
-                video_width = video_info['videoResource']['dash']['video_list']['video_2']['vwidth']
-                video_height = video_info['videoResource']['dash']['video_list']['video_2']['vheight']
-                video_url_dict["video_url"] = video_url
-                video_url_dict["audio_url"] = audio_url
-                video_url_dict["video_width"] = video_width
-                video_url_dict["video_height"] = video_height
-            elif "video_list" in video_info['videoResource']['dash'] and 'video_1' in \
-                    video_info['videoResource']['dash']['video_list']:
-                video_url = video_info['videoResource']['dash']['video_list']['video_1']['backup_url_1']
-                audio_url = video_info['videoResource']['dash']['video_list']['video_1']['backup_url_1']
-                if len(video_url) % 3 == 1:
-                    video_url += '=='
-                elif len(video_url) % 3 == 2:
-                    video_url += '='
-                elif len(audio_url) % 3 == 1:
-                    audio_url += '=='
-                elif len(audio_url) % 3 == 2:
-                    audio_url += '='
-                video_url = base64.b64decode(video_url).decode('utf8')
-                audio_url = base64.b64decode(audio_url).decode('utf8')
-                video_width = video_info['videoResource']['dash']['video_list']['video_1']['vwidth']
-                video_height = video_info['videoResource']['dash']['video_list']['video_1']['vheight']
-                video_url_dict["video_url"] = video_url
-                video_url_dict["audio_url"] = audio_url
-                video_url_dict["video_width"] = video_width
-                video_url_dict["video_height"] = video_height
-
-            elif 'dynamic_video' in video_info['videoResource']['dash'] \
-                    and 'dynamic_video_list' in video_info['videoResource']['dash']['dynamic_video'] \
-                    and 'dynamic_audio_list' in video_info['videoResource']['dash']['dynamic_video'] \
-                    and len(video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list']) != 0 \
-                    and len(video_info['videoResource']['dash']['dynamic_video']['dynamic_audio_list']) != 0:
-
-                video_url = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
-                    'backup_url_1']
-                audio_url = video_info['videoResource']['dash']['dynamic_video']['dynamic_audio_list'][-1][
-                    'backup_url_1']
-                if len(video_url) % 3 == 1:
-                    video_url += '=='
-                elif len(video_url) % 3 == 2:
-                    video_url += '='
-                elif len(audio_url) % 3 == 1:
-                    audio_url += '=='
-                elif len(audio_url) % 3 == 2:
-                    audio_url += '='
-                video_url = base64.b64decode(video_url).decode('utf8')
-                audio_url = base64.b64decode(audio_url).decode('utf8')
-                video_width = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
-                    'vwidth']
-                video_height = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
-                    'vheight']
-                video_url_dict["video_url"] = video_url
-                video_url_dict["audio_url"] = audio_url
-                video_url_dict["video_width"] = video_width
-                video_url_dict["video_height"] = video_height
-            else:
-                video_url_dict["video_url"] = ''
-                video_url_dict["audio_url"] = ''
-                video_url_dict["video_width"] = 0
-                video_url_dict["video_height"] = 0
-
-        elif 'normal' in video_info['videoResource']:
-            if "video_list" in video_info['videoResource']['normal'] and 'video_4' in \
-                    video_info['videoResource']['normal']['video_list']:
-                video_url = video_info['videoResource']['normal']['video_list']['video_4']['backup_url_1']
-                audio_url = video_info['videoResource']['normal']['video_list']['video_4']['backup_url_1']
-                if len(video_url) % 3 == 1:
-                    video_url += '=='
-                elif len(video_url) % 3 == 2:
-                    video_url += '='
-                elif len(audio_url) % 3 == 1:
-                    audio_url += '=='
-                elif len(audio_url) % 3 == 2:
-                    audio_url += '='
-                video_url = base64.b64decode(video_url).decode('utf8')
-                audio_url = base64.b64decode(audio_url).decode('utf8')
-                video_width = video_info['videoResource']['normal']['video_list']['video_4']['vwidth']
-                video_height = video_info['videoResource']['normal']['video_list']['video_4']['vheight']
-                video_url_dict["video_url"] = video_url
-                video_url_dict["audio_url"] = audio_url
-                video_url_dict["video_width"] = video_width
-                video_url_dict["video_height"] = video_height
-            elif "video_list" in video_info['videoResource']['normal'] and 'video_3' in \
-                    video_info['videoResource']['normal']['video_list']:
-                video_url = video_info['videoResource']['normal']['video_list']['video_3']['backup_url_1']
-                audio_url = video_info['videoResource']['normal']['video_list']['video_3']['backup_url_1']
-                if len(video_url) % 3 == 1:
-                    video_url += '=='
-                elif len(video_url) % 3 == 2:
-                    video_url += '='
-                elif len(audio_url) % 3 == 1:
-                    audio_url += '=='
-                elif len(audio_url) % 3 == 2:
-                    audio_url += '='
-                video_url = base64.b64decode(video_url).decode('utf8')
-                audio_url = base64.b64decode(audio_url).decode('utf8')
-                video_width = video_info['videoResource']['normal']['video_list']['video_3']['vwidth']
-                video_height = video_info['videoResource']['normal']['video_list']['video_3']['vheight']
-                video_url_dict["video_url"] = video_url
-                video_url_dict["audio_url"] = audio_url
-                video_url_dict["video_width"] = video_width
-                video_url_dict["video_height"] = video_height
-            elif "video_list" in video_info['videoResource']['normal'] and 'video_2' in \
-                    video_info['videoResource']['normal']['video_list']:
-                video_url = video_info['videoResource']['normal']['video_list']['video_2']['backup_url_1']
-                audio_url = video_info['videoResource']['normal']['video_list']['video_2']['backup_url_1']
-                if len(video_url) % 3 == 1:
-                    video_url += '=='
-                elif len(video_url) % 3 == 2:
-                    video_url += '='
-                elif len(audio_url) % 3 == 1:
-                    audio_url += '=='
-                elif len(audio_url) % 3 == 2:
-                    audio_url += '='
-                video_url = base64.b64decode(video_url).decode('utf8')
-                audio_url = base64.b64decode(audio_url).decode('utf8')
-                video_width = video_info['videoResource']['normal']['video_list']['video_2']['vwidth']
-                video_height = video_info['videoResource']['normal']['video_list']['video_2']['vheight']
-                video_url_dict["video_url"] = video_url
-                video_url_dict["audio_url"] = audio_url
-                video_url_dict["video_width"] = video_width
-                video_url_dict["video_height"] = video_height
-            elif "video_list" in video_info['videoResource']['normal'] and 'video_1' in \
-                    video_info['videoResource']['normal']['video_list']:
-                video_url = video_info['videoResource']['normal']['video_list']['video_1']['backup_url_1']
-                audio_url = video_info['videoResource']['normal']['video_list']['video_1']['backup_url_1']
-                if len(video_url) % 3 == 1:
-                    video_url += '=='
-                elif len(video_url) % 3 == 2:
-                    video_url += '='
-                elif len(audio_url) % 3 == 1:
-                    audio_url += '=='
-                elif len(audio_url) % 3 == 2:
-                    audio_url += '='
-                video_url = base64.b64decode(video_url).decode('utf8')
-                audio_url = base64.b64decode(audio_url).decode('utf8')
-                video_width = video_info['videoResource']['normal']['video_list']['video_1']['vwidth']
-                video_height = video_info['videoResource']['normal']['video_list']['video_1']['vheight']
-                video_url_dict["video_url"] = video_url
-                video_url_dict["audio_url"] = audio_url
-                video_url_dict["video_width"] = video_width
-                video_url_dict["video_height"] = video_height
-
-            elif 'dynamic_video' in video_info['videoResource']['normal'] \
-                    and 'dynamic_video_list' in video_info['videoResource']['normal']['dynamic_video'] \
-                    and 'dynamic_audio_list' in video_info['videoResource']['normal']['dynamic_video'] \
-                    and len(video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list']) != 0 \
-                    and len(video_info['videoResource']['normal']['dynamic_video']['dynamic_audio_list']) != 0:
-
-                video_url = video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list'][-1][
-                    'backup_url_1']
-                audio_url = video_info['videoResource']['normal']['dynamic_video']['dynamic_audio_list'][-1][
-                    'backup_url_1']
-                if len(video_url) % 3 == 1:
-                    video_url += '=='
-                elif len(video_url) % 3 == 2:
-                    video_url += '='
-                elif len(audio_url) % 3 == 1:
-                    audio_url += '=='
-                elif len(audio_url) % 3 == 2:
-                    audio_url += '='
-                video_url = base64.b64decode(video_url).decode('utf8')
-                audio_url = base64.b64decode(audio_url).decode('utf8')
-                video_width = video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list'][-1][
-                    'vwidth']
-                video_height = video_info['videoResource']['normal']['dynamic_video']['dynamic_video_list'][-1][
-                    'vheight']
-                video_url_dict["video_url"] = video_url
-                video_url_dict["audio_url"] = audio_url
-                video_url_dict["video_width"] = video_width
-                video_url_dict["video_height"] = video_height
-            else:
-                video_url_dict["video_url"] = ''
-                video_url_dict["audio_url"] = ''
-                video_url_dict["video_width"] = 0
-                video_url_dict["video_height"] = 0
-
-        else:
-            video_url_dict["video_url"] = ''
-            video_url_dict["audio_url"] = ''
-            video_url_dict["video_width"] = 0
-            video_url_dict["video_height"] = 0
-
-        return video_url_dict
-
-    @classmethod
-    def get_comment_cnt(cls, item_id):
-        url = "https://www.ixigua.com/tlb/comment/article/v5/tab_comments/?"
-        params = {
-            "tab_index": "0",
-            "count": "10",
-            "offset": "10",
-            "group_id": str(item_id),
-            "item_id": str(item_id),
-            "aid": "1768",
-            "msToken": "50-JJObWB07HfHs-BMJWT1eIDX3G-6lPSF_i-QwxBIXE9VVa-iN0jbEXR5pG2DKjXBmP299n6ZTuXzY-GAy968CCvouSAYIS4GzvGQT3pNlKNejr5G4-1g==",
-            "X-Bogus": "DFSzswVOyGtANVeWtCLMqR/F6q9U",
-            "_signature": cls.random_signature(),
-        }
-        headers = {
-            'authority': 'www.ixigua.com',
-            'accept': 'application/json, text/plain, */*',
-            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
-            'cache-control': 'no-cache',
-            'cookie': 'MONITOR_WEB_ID=67cb5099-a022-4ec3-bb8e-c4de6ba51dd0; passport_csrf_token=72b2574f3c99f8ba670e42df430218fd; passport_csrf_token_default=72b2574f3c99f8ba670e42df430218fd; sid_guard=c7472b508ea631823ba765a60cf8757f%7C1680867422%7C3024002%7CFri%2C+12-May-2023+11%3A37%3A04+GMT; uid_tt=c13f47d51767f616befe32fb3e9f485a; uid_tt_ss=c13f47d51767f616befe32fb3e9f485a; sid_tt=c7472b508ea631823ba765a60cf8757f; sessionid=c7472b508ea631823ba765a60cf8757f; sessionid_ss=c7472b508ea631823ba765a60cf8757f; sid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; ssid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; odin_tt=b893608d4dde2e1e8df8cd5d97a0e2fbeafc4ca762ac72ebef6e6c97e2ed19859bb01d46b4190ddd6dd17d7f9678e1de; SEARCH_CARD_MODE=7168304743566296612_0; support_webp=true; support_avif=false; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; tt_scid=7Pux7s634-z8DYvCM20y7KigwH5u7Rh6D9C-RROpnT.aGMEcz6Vsxp.oai47wJqa4f86; ttwid=1%7CHHtv2QqpSGuSu8r-zXF1QoWsvjmNi1SJrqOrZzg-UCY%7C1683858689%7Ca5223fe1500578e01e138a0d71d6444692018296c4c24f5885af174a65873c95; ixigua-a-s=3; msToken=50-JJObWB07HfHs-BMJWT1eIDX3G-6lPSF_i-QwxBIXE9VVa-iN0jbEXR5pG2DKjXBmP299n6ZTuXzY-GAy968CCvouSAYIS4GzvGQT3pNlKNejr5G4-1g==; __ac_nonce=0645dcbf0005064517440; __ac_signature=_02B4Z6wo00f01FEGmAwAAIDBKchzCGqn-MBRJpyAAHAjieFC5GEg6gGiwz.I4PRrJl7f0GcixFrExKmgt6QI1i1S-dQyofPEj2ugWTCnmKUdJQv-wYuDofeKNe8VtMtZq2aKewyUGeKU-5Ud21; ixigua-a-s=3',
-            'pragma': 'no-cache',
-            'referer': f'https://www.ixigua.com/{item_id}?logTag=3c5aa86a8600b9ab8540',
-            'sec-ch-ua': '"Microsoft Edge";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
-            'sec-ch-ua-mobile': '?0',
-            'sec-ch-ua-platform': '"macOS"',
-            'sec-fetch-dest': 'empty',
-            'sec-fetch-mode': 'cors',
-            'sec-fetch-site': 'same-origin',
-            'tt-anti-token': 'cBITBHvmYjEygzv-f9c78c1297722cf1f559c74b084e4525ce4900bdcf9e8588f20cc7c2e3234422',
-            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35',
-            'x-secsdk-csrf-token': '000100000001f8e733cf37f0cd255a51aea9a81ff7bc0c09490cfe41ad827c3c5c18ec809279175e4d9f5553d8a5'
-        }
-        urllib3.disable_warnings()
-        s = requests.session()
-        # max_retries=3 重试3次
-        s.mount('http://', HTTPAdapter(max_retries=3))
-        s.mount('https://', HTTPAdapter(max_retries=3))
-        response = s.get(url=url, headers=headers, params=params, verify=False, proxies=Common.tunnel_proxies(), timeout=5)
-        response.close()
-        if response.status_code != 200 or 'total_number' not in response.json() or response.json() == {}:
-            return 0
-        return response.json().get("total_number", 0)
-
-    # 获取视频详情
-    @classmethod
-    def get_video_info(cls, log_type, crawler, item_id):
-        url = 'https://www.ixigua.com/api/mixVideo/information?'
-        headers = {
-            "accept-encoding": "gzip, deflate",
-            "accept-language": "zh-CN,zh-Hans;q=0.9",
-            "user-agent": get_random_user_agent('pc'),
-            "referer": "https://www.ixigua.com/7102614741050196520?logTag=0531c88ac04f38ab2c62",
-        }
-        params = {
-            'mixId': str(item_id),
-            'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfC'
-                       'NVVIOBNjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
-            'X-Bogus': 'DFSzswVupYTANCJOSBk0P53WxM-r',
-            '_signature': '_02B4Z6wo0000119LvEwAAIDCuktNZ0y5wkdfS7jAALThuOR8D9yWNZ.EmWHKV0WSn6Px'
-                          'fPsH9-BldyxVje0f49ryXgmn7Tzk-swEHNb15TiGqa6YF.cX0jW8Eds1TtJOIZyfc9s5emH7gdWN94',
-        }
-        cookies = {
-            'ixigua-a-s': '1',
-            'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfCNVVIOB'
-                       'NjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
-            'ttwid': '1%7C_yXQeHWwLZgCsgHClOwTCdYSOt_MjdOkgnPIkpi-Sr8%7C1661241238%7Cf57d0c5ef3f1d7'
-                     '6e049fccdca1ac54887c34d1f8731c8e51a49780ff0ceab9f8',
-            'tt_scid': 'QZ4l8KXDG0YAEaMCSbADdcybdKbUfG4BC6S4OBv9lpRS5VyqYLX2bIR8CTeZeGHR9ee3',
-            'MONITOR_WEB_ID': '0a49204a-7af5-4e96-95f0-f4bafb7450ad',
-            '__ac_nonce': '06304878000964fdad287',
-            '__ac_signature': '_02B4Z6wo00f017Rcr3AAAIDCUVxeW1tOKEu0fKvAAI4cvoYzV-wBhq7B6D8k0no7lb'
-                              'FlvYoinmtK6UXjRIYPXnahUlFTvmWVtb77jsMkKAXzAEsLE56m36RlvL7ky.M3Xn52r9t1IEb7IR3ke8',
-            'ttcid': 'e56fabf6e85d4adf9e4d91902496a0e882',
-            '_tea_utm_cache_1300': 'undefined',
-            'support_avif': 'false',
-            'support_webp': 'false',
-            'xiguavideopcwebid': '7134967546256016900',
-            'xiguavideopcwebid.sig': 'xxRww5R1VEMJN_dQepHorEu_eAc',
-        }
-        urllib3.disable_warnings()
-        s = requests.session()
-        # max_retries=3 重试3次
-        s.mount('http://', HTTPAdapter(max_retries=3))
-        s.mount('https://', HTTPAdapter(max_retries=3))
-        response = s.get(url=url, headers=headers, params=params, cookies=cookies, verify=False, proxies=Common.tunnel_proxies(), timeout=5)
-        response.close()
-        if response.status_code != 200 or 'data' not in response.json() or response.json()['data'] == {}:
-            Log.logging(log_type, crawler).warning(f"get_video_info:{response.status_code}, {response.text}\n")
-            return None
-        else:
-            video_info = response.json()['data'].get("gidInformation", {}).get("packerData", {}).get("video", {})
-            if video_info == {}:
-                return None
-            video_dict = {
-                "video_title": video_info.get("title", ""),
-                "video_id": video_info.get("videoResource", {}).get("vid", ""),
-                "gid": str(item_id),
-                "play_cnt": int(video_info.get("video_watch_count", 0)),
-                "like_cnt": int(video_info.get("video_like_count", 0)),
-                "comment_cnt": int(cls.get_comment_cnt(item_id)),
-                "share_cnt": 0,
-                "favorite_cnt": 0,
-                "duration": int(video_info.get("video_duration", 0)),
-                "video_width": int(cls.get_video_url(video_info)["video_width"]),
-                "video_height": int(cls.get_video_url(video_info)["video_height"]),
-                "publish_time_stamp": int(video_info.get("video_publish_time", 0)),
-                "publish_time_str": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(video_info.get("video_publish_time", 0)))),
-                "user_name": video_info.get("user_info", {}).get("name", ""),
-                "user_id": str(video_info.get("user_info", {}).get("user_id", "")),
-                "avatar_url": str(video_info.get("user_info", {}).get("avatar_url", "")),
-                "cover_url": video_info.get("poster_url", ""),
-                "audio_url": cls.get_video_url(video_info)["audio_url"],
-                "video_url": cls.get_video_url(video_info)["video_url"],
-                "session": f"xigua-search-{int(time.time())}"
-            }
-            return video_dict
-
-    @classmethod
-    def repeat_video(cls, log_type, crawler, video_id, env):
-        # sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
-        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
-        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, action="")
-        return len(repeat_video)
-
-    # 下载 / 上传
-    @classmethod
-    def download_publish(cls, log_type, crawler, user_dict, video_dict, rule_dict, title_score, env):
-
-        Common.download_method(log_type=log_type, crawler=crawler, text='xigua_video',
-                               title=video_dict['video_title'], url=video_dict['video_url'])
-        # 下载音频
-        Common.download_method(log_type=log_type, crawler=crawler, text='xigua_audio',
-                               title=video_dict['video_title'], url=video_dict['audio_url'])
-        # 合成音视频
-        Common.video_compose(log_type=log_type, crawler=crawler,
-                             video_dir=f"./{crawler}/videos/{video_dict['video_title']}")
-        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-        try:
-            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
-                # 删除视频文件夹
-                shutil.rmtree(f"./{crawler}/videos/{md_title}")
-                Log.logging(log_type, crawler).info("视频size=0,删除成功\n")
-                return
-        except FileNotFoundError:
-            # 删除视频文件夹
-            shutil.rmtree(f"./{crawler}/videos/{md_title}")
-            Log.logging(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
-            return
-        # 下载封面
-        Common.download_method(log_type=log_type, crawler=crawler, text='cover',
-                               title=video_dict['video_title'], url=video_dict['cover_url'])
-        # 保存视频信息至txt
-        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
-
-        # 上传视频
-        Log.logging(log_type, crawler).info("开始上传视频...")
-        if env == "dev":
-            oss_endpoint = "out"
-            our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                      crawler=crawler,
-                                                      strategy="搜索抓取策略",
-                                                      our_uid=user_dict["uid"],
-                                                      env=env,
-                                                      oss_endpoint=oss_endpoint)
-            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-        else:
-            oss_endpoint = "inner"
-            our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                      crawler=crawler,
-                                                      strategy="搜索抓取策略",
-                                                      our_uid=user_dict["uid"],
-                                                      env=env,
-                                                      oss_endpoint=oss_endpoint)
-
-            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-
-        if our_video_id is None:
-            try:
-                # 删除视频文件夹
-                shutil.rmtree(f"./{crawler}/videos/{md_title}")
-                return
-            except FileNotFoundError:
-                return
-
-        # 视频信息保存数据库
-        insert_sql = f""" insert into crawler_video(video_id,
-                                user_id,
-                                out_user_id,
-                                platform,
-                                strategy,
-                                out_video_id,
-                                video_title,
-                                cover_url,
-                                video_url,
-                                duration,
-                                publish_time,
-                                play_cnt,
-                                crawler_rule,
-                                width,
-                                height)
-                                values({our_video_id},
-                                {user_dict["uid"]},
-                                "{video_dict['user_id']}",
-                                "{cls.platform}",
-                                "搜索爬虫策略",
-                                "{video_dict['video_id']}",
-                                "{video_dict['video_title']}",
-                                "{video_dict['cover_url']}",
-                                "{video_dict['video_url']}",
-                                {int(video_dict['duration'])},
-                                "{video_dict['publish_time_str']}",
-                                {int(video_dict['play_cnt'])},
-                                '{json.dumps(rule_dict)}',
-                                {int(video_dict['video_width'])},
-                                {int(video_dict['video_height'])}) """
-        Log.logging(log_type, crawler).info(f"insert_sql:{insert_sql}")
-        MysqlHelper.update_values(log_type, crawler, insert_sql, env, action="")
-        cls.download_cnt += 1
-        Log.logging(log_type, crawler).info("视频信息写入数据库完成")
-
-        # 视频信息写入飞书
-        Feishu.insert_columns(log_type, crawler, "BUNvGC", "ROWS", 1, 2)
-        values = [[title_score,
-            user_dict["link"],
-            time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
-            "关键词搜索",
-            video_dict['video_title'],
-            str(video_dict['video_id']),
-            our_video_link,
-            video_dict['gid'],
-            video_dict['play_cnt'],
-            video_dict['comment_cnt'],
-            video_dict['like_cnt'],
-            video_dict['share_cnt'],
-            video_dict['duration'],
-            str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
-            video_dict['publish_time_str'],
-            video_dict['user_name'],
-            video_dict['user_id'],
-            video_dict['avatar_url'],
-            video_dict['cover_url'],
-            video_dict['video_url'],
-            video_dict['audio_url']]]
-        time.sleep(0.5)
-        Feishu.update_values(log_type, crawler, "BUNvGC", "D2:Z2", values)
-        Log.logging(log_type, crawler).info('视频信息写入飞书完成\n')
-
-    @classmethod
-    def get_search_videos(cls, log_type, crawler, user_list, rule_dict, env):
-        Log.logging(log_type, crawler).info(f"搜索词总数:{len(user_list)}\n")
-        for user_dict in user_list:
-            try:
-                cls.download_cnt = 0
-                Log.logging(log_type, crawler).info(f"开始抓取 {user_dict['link']} 视频")
-                cls.get_videoList(log_type=log_type,
-                                  crawler=crawler,
-                                  user_dict=user_dict,
-                                  rule_dict=rule_dict,
-                                  env=env)
-            except Exception as e:
-                Log.logging(log_type, crawler).error(f"抓取{user_dict['link']}视频时异常:{e}\n")
-
-    @classmethod
-    def get_videoList(cls, log_type, crawler, user_dict, rule_dict, env):
-        # mq = MQ(topic_name="topic_crawler_etl_" + env)
-        # 打印请求配置
-        ca = DesiredCapabilities.CHROME
-        ca["goog:loggingPrefs"] = {"performance": "ALL"}
-        # # 不打开浏览器运行
-        chrome_options = webdriver.ChromeOptions()
-        chrome_options.add_argument(f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
-        chrome_options.add_argument("--headless")
-        chrome_options.add_argument("--window-size=1920,1080")
-        chrome_options.add_argument("--no-sandbox")
-        if env == "dev":
-            chromedriver = "/Users/wangkun/Downloads/chromedriver/chromedriver_v114/chromedriver"
-        else:
-            chromedriver = "/usr/bin/chromedriver"
-        # driver初始化
-        driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(chromedriver))
-        driver.implicitly_wait(10)
-        Log.logging(log_type, crawler).info(f"打开搜索页:{user_dict['link']}")
-        driver.get(f"https://www.ixigua.com/search/{user_dict['link']}/")
-        time.sleep(2)
-        Log.logging(log_type, crawler).info("关闭登录弹框")
-        if driver.find_elements(By.XPATH, '//*[@class="xg-notification-close"]') != 0:
-            driver.find_element(By.XPATH, '//*[@class="xg-notification-close"]').click()
-        # driver.get_screenshot_as_file(f"./{crawler}/photos/{user_dict['link']}-关闭登录弹框.png")
-        Log.logging(log_type, crawler).info("展开筛选按钮")
-        driver.find_element(By.XPATH, '//*[@class="searchPageV2__header-icons-categories"]').click()
-        Log.logging(log_type, crawler).info("点击最新排序")
-        driver.find_element(By.XPATH, '//*[@class="searchPageV2__header-categories-wrapper"]/*[1]/*[2]/*[1]').click()
-        time.sleep(1)
-        # driver.get_screenshot_as_file(f"./{crawler}/photos/{user_dict['link']}-最新排序.png")
-        Log.logging(log_type, crawler).info("收起筛选按钮\n")
-        driver.find_element(By.XPATH, '//*[@class="searchPageV2__header-icons-categories"]').click()
-
-        index = 0
-        num = 0
-        while True:
-            # video_elements = driver.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard searchPageV2__card single"]')
-            video_elements = driver.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard searchPageV2__card"]')
-            video_element_temp = video_elements[index:]
-            if len(video_element_temp) == 0:
-                Log.logging(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
-                driver.quit()
-                return
-            for i, video_element in enumerate(video_element_temp):
-                try:
-                    if cls.download_cnt >= int(rule_dict.get("videos_cnt", {}).get("min", 30)):
-                        Log.logging(log_type, crawler).info(f"搜索词: {user_dict['link']},已下载视频数: {cls.download_cnt}\n")
-                        driver.quit()
-                        return
-                    if video_element is None:
-                        Log.logging(log_type, crawler).info('到底啦~\n')
-                        driver.quit()
-                        return
-                    driver.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard searchPageV2__card"]')
-                    num += 1
-                    Log.logging(log_type, crawler).info(f'拖动"视频"列表第{num}个至屏幕中间')
-                    driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})", video_element)
-                    time.sleep(1)
-                    # driver.get_screenshot_as_file(f"./{crawler}/photos/{user_dict['link']}-{num}.png")
-                    title = video_element.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard__coverWrapper disableZoomAnimation"]')[index+i-1].get_attribute('title')
-                    publish_day = video_element.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard-accessories-bottomInfo__statistics"]')[index+i-1].text.split('· ')[-1]
-                    Log.logging(log_type, crawler).info(f"标题:{title}")
-                    Log.logging(log_type, crawler).info(f"发布时间:{publish_day}")
-                    if "年" in publish_day:
-                        Log.logging(log_type, crawler).info("发布时间超过 1 年\n")
-                        driver.quit()
-                        return
-
-                    item_id = video_element.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard__coverWrapper disableZoomAnimation"]')[index+i-1].get_attribute('href')
-                    item_id = item_id.split("com/")[-1].split("?&")[0]
-                    video_dict = cls.get_video_info(log_type, crawler, item_id)
-                    if video_dict is None:
-                        Log.logging(log_type, crawler).info("无效视频\n")
-                        continue
-                    for k, v in video_dict.items():
-                        Log.logging(log_type, crawler).info(f"{k}:{v}")
-
-                    # if int((int(time.time()) - int(video_dict["publish_time_stamp"])) / (3600 * 24)) > int(rule_dict.get("period", {}).get("max", 1000)):
-                    #     Log.logging(log_type, crawler).info(f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
-                    #     driver.quit()
-                    #     return
-
-                    if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
-                        Log.logging(log_type, crawler).info("不满足抓取规则\n")
-                    elif any(str(word) if str(word) in video_dict["video_title"] else False
-                             for word in get_config_from_mysql(log_type=log_type,
-                                                               source=crawler,
-                                                               env=env,
-                                                               text="filter",
-                                                               action="")) is True:
-                        Log.logging(log_type, crawler).info('已中过滤词\n')
-                    elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
-                        Log.logging(log_type, crawler).info('视频已下载\n')
-                    else:
-                        # title_score = get_title_score(log_type, "kuaishou", "16QspO", "0usaDk", video_dict["video_title"])
-                        # if title_score <= 0.3:
-                        #     Log.logging(log_type, crawler).info(f"权重分:{title_score}<=0.3\n")
-                        #     continue
-                        # Log.logging(log_type, crawler).info(f"权重分:{title_score}>0.3\n")
-                        # cls.download_publish(log_type=log_type,
-                        #                      crawler=crawler,
-                        #                      user_dict=user_dict,
-                        #                      video_dict=video_dict,
-                        #                      rule_dict=rule_dict,
-                        #                      title_score=title_score,
-                        #                      env=env)
-                        video_dict["out_user_id"] = video_dict["user_id"]
-                        video_dict["platform"] = crawler
-                        video_dict["strategy"] = log_type
-                        video_dict["out_video_id"] = video_dict["video_id"]
-                        video_dict["width"] = video_dict["video_width"]
-                        video_dict["height"] = video_dict["video_height"]
-                        video_dict["crawler_rule"] = json.dumps(rule_dict)
-                        video_dict["user_id"] = user_dict["uid"]
-                        video_dict["publish_time"] = video_dict["publish_time_str"]
-                        video_dict["strategy_type"] = log_type
-                        # mq.send_msg(video_dict)
-                        cls.download_cnt += 1
-                        Log.logging(log_type, crawler).info("已下载视频数+1\n")
-
-                except Exception as e:
-                    Log.logging(log_type, crawler).warning(f"抓取单条视频异常:{e}\n")
-
-            Log.logging(log_type, crawler).info('已抓取完一组视频,休眠10秒\n')
-            time.sleep(10)
-            index = index + len(video_element_temp)
-
-
-if __name__ == '__main__':
-
-    pass

+ 1 - 1
dev/dev_main/__init__.py → dev/mitm/__init__.py

@@ -1,3 +1,3 @@
 # -*- coding: utf-8 -*-
 # @Author: wangkun
-# @Time: 2023/7/4
+# @Time: 2023/9/20

+ 23 - 0
dev/mitm/start_main.py

@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/9/21
+import subprocess
+import time
+
+
+def start_main():
+    # 启动 start_mitmproxy.py 脚本的子进程
+    mitmproxy_process = subprocess.Popen(['python', 'start_mitmproxy.py'])
+
+    # 等待 mitmproxy 启动
+    time.sleep(2)
+
+    # 启动 start_selenium.py 脚本的子进程
+    selenium_process = subprocess.Popen(['python', 'start_selenium.py'])
+
+    # 等待子进程完成
+    mitmproxy_process.wait()
+    selenium_process.wait()
+
+
+start_main()

+ 53 - 0
dev/mitm/start_mitm.py

@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/9/20
+import mitmproxy.http
+from mitmproxy import options
+from mitmproxy.addons import dump
+from mitmproxy.tools.dump import DumpMaster
+# from mitmproxy.addons import DumpAddon
+from common.common import Common  # 导入封装好的 log 记录方法
+
+
+class MyAddon:
+    @staticmethod
+    def request(flow: mitmproxy.http.HTTPFlow):
+        # 获取请求数据
+        request_data = flow.request.get_text()
+
+        # 调用 log 记录方法,将请求数据传递给它
+        Common.logger('mitm', 'dev').info(request_data)
+
+    @staticmethod
+    def response(flow: mitmproxy.http.HTTPFlow):
+        # 获取响应数据
+        response_data = flow.response.get_text()
+
+        # 调用 log 记录方法,将响应数据传递给它
+        Common.logger('mitm', 'dev').info(response_data)
+
+
+def start_mitmproxy():
+    # 创建 mitmproxy 的选项
+    mitmproxy_options = options.Options(listen_host='0.0.0.0', listen_port=8888)
+
+    # 创建代理配置
+    # proxy_config = ProxyConfig(options=mitmproxy_options)
+
+    # 创建 DumpAddon 以将请求和响应保存到日志文件
+    dump_addon = dump.Dump()
+
+    # 创建 MyAddon 实例
+    my_addon = MyAddon()
+
+    # 将 MyAddon 添加到 mitmproxy 的插件中
+    dump_addon.addons.append(my_addon)
+
+    # 创建 DumpMaster,使用代理配置和 DumpAddon
+    dump_master = DumpMaster(options=mitmproxy_options, with_termlog=False, with_dumper=dump_addon)
+
+    # 启动 mitmproxy
+    dump_master.run()
+
+
+start_mitmproxy()

+ 29 - 0
dev/mitm/start_selenium.py

@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/9/20
+from selenium import webdriver
+from common.common import Common  # 导入封装好的 log 记录方法
+
+
+def start_selenium():
+    # 创建 ChromeOptions 对象
+    chrome_options = webdriver.ChromeOptions()
+    chrome_options.add_argument('--remote-debugging-port=8888')
+
+    # 启动带有指定选项的 Selenium WebDriver
+    driver = webdriver.Chrome(options=chrome_options)
+
+    # 访问抖音首页
+    driver.get('https://www.douyin.com/')
+
+    # 获取页面内容
+    page_content = driver.page_source
+
+    # 调用 log 记录方法,将页面内容传递给它
+    Common.logger('mitm', 'dev').info(page_content)
+
+    # 关闭 WebDriver
+    driver.quit()
+
+
+start_selenium()

+ 2 - 2
weixinzhishu/weixinzhishu_key/search_key_mac.py

@@ -221,12 +221,12 @@ class SearchKey:
     @classmethod
     def main(cls, log_type, crawler):
         while True:
-            if 11 <= datetime.now().hour <= 14:
+            if 11 <= datetime.now().hour < 14:
                 cls.write_wechat_key(log_type, crawler)
                 Common.logger(log_type, crawler).info('休眠10秒\n')
                 time.sleep(10)
             else:
-                Common.logger(log_type, crawler).info("休眠中,获取 search_key 的时间段为: 11:00:00 - 14:59:59")
+                Common.logger(log_type, crawler).info("休眠中,获取 search_key 的时间段为: 11:00:00 - 13:59:59")
                 time.sleep(60)
 
 

+ 17 - 16
zhufuquanzi/zhufuquanzi_main/run_zfqz_dev.py

@@ -12,22 +12,23 @@ from zhufuquanzi.zhufuquanzi_recommend.zhufuquanzi_recommend2 import ZFQZRecomme
 class ZFQZRecommendMain:
     @classmethod
     def zhufuquanzi_recommend_main(cls, log_type, crawler, env):
-        Common.logger(log_type, crawler).info('开始抓取"祝福圈子"推荐')
-        Common.logging(log_type, crawler, env, '开始抓取"祝福圈子"推荐')
-        rule_dict = {"period": {"min": 365, "max": 365},
-                     "duration": {"min": 40, "max": 2400},
-                     # "play_cnt": {"min": 100000, "max": 0},
-                     "play_cnt": {"min": 1, "max": 0},
-                     "videos_cnt": {"min": 10, "max": 0},
-                     # "like_cnt": {"min": 1000, "max": 0}}
-                     "like_cnt": {"min": 0, "max": 0}}
-        ZFQZRecommend.start_wechat(log_type=log_type,
-                                   crawler=crawler,
-                                   rule_dict=rule_dict,
-                                   our_uid=6267141,
-                                   env=env)
-        Common.logger(log_type, crawler).info("抓取一轮结束\n")
-        Common.logging(log_type, crawler, env, "抓取一轮结束\n")
+        for i in range(1):
+            Common.logger(log_type, crawler).info('开始抓取"祝福圈子"推荐')
+            Common.logging(log_type, crawler, env, '开始抓取"祝福圈子"推荐')
+            rule_dict = {"period": {"min": 365, "max": 365},
+                         "duration": {"min": 40, "max": 2400},
+                         "play_cnt": {"min": 100000, "max": 0},
+                         "like_cnt": {"min": 1000, "max": 0},
+                         # "play_cnt": {"min": 1, "max": 0},
+                         # "like_cnt": {"min": 0, "max": 0},
+                         "videos_cnt": {"min": 10, "max": 0}}
+            ZFQZRecommend.start_wechat(log_type=log_type,
+                                       crawler=crawler,
+                                       rule_dict=rule_dict,
+                                       our_uid=6267141,
+                                       env=env)
+            Common.logger(log_type, crawler).info("抓取一轮结束\n")
+            Common.logging(log_type, crawler, env, "抓取一轮结束\n")
 
 
 if __name__ == "__main__":

+ 46 - 19
zhufuquanzi/zhufuquanzi_recommend/zhufuquanzi_recommend2.py

@@ -24,6 +24,8 @@ from common.scheduling_db import MysqlHelper
 class ZFQZRecommend:
     platform = "祝福圈子"
     download_cnt = 0
+    element_list = []
+    i = 0
 
     @classmethod
     def start_wechat(cls, log_type, crawler, env, rule_dict, our_uid):
@@ -37,7 +39,7 @@ class ZFQZRecommend:
         caps = {
             "platformName": "Android",
             "devicesName": "Android",
-            "platformVersion": "7",
+            "platformVersion": "11",
             # "udid": "emulator-5554",
             "appPackage": "com.tencent.mm",
             "appActivity": ".ui.LauncherUI",
@@ -78,15 +80,15 @@ class ZFQZRecommend:
         size = driver.get_window_size()
         driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2),
                      int(size['width'] * 0.5), int(size['height'] * 0.8), 200)
-        time.sleep(5)
+        time.sleep(1)
         Common.logger(log_type, crawler).info('打开小程序"祝福圈子"')
         Common.logging(log_type, crawler, env, '打开小程序"祝福圈子"')
         driver.find_elements(By.XPATH, '//*[@text="祝福圈子"]')[-1].click()
-        time.sleep(10)
+        time.sleep(5)
 
         cls.get_videoList(log_type, crawler, driver, env, rule_dict, our_uid)
 
-        time.sleep(3)
+        time.sleep(1)
         driver.quit()
 
     @classmethod
@@ -140,11 +142,15 @@ class ZFQZRecommend:
         for i in range(3):
             cls.search_elements(driver, '//*[@class="bless--list"]')
             Common.logger(log_type, crawler).info(f"video_title_element:{video_title_element[0]}")
+            time.sleep(1)
+            Common.logger(log_type, crawler).info("滑动标题至可见状态")
+            driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'});", video_title_element[0])
+            time.sleep(3)
             Common.logger(log_type, crawler).info("点击标题")
             video_title_element[0].click()
-            # driver.execute_script("arguments[0].click()", video_title_element[0])
+            # driver.execute_script("arguments[0].click();", video_title_element[0])
             Common.logger(log_type, crawler).info("点击标题完成")
-            time.sleep(5)
+            time.sleep(1)
             video_url_elements = cls.search_elements(driver, '//*[@class="index--video-item index--video"]')
             if video_url_elements:
                 return video_url_elements[0].get_attribute("src")
@@ -155,15 +161,16 @@ class ZFQZRecommend:
         driver.implicitly_wait(20)
         cls.check_to_applet(log_type=log_type, crawler=crawler, env=env, driver=driver,
                             xpath='//*[@class="tags--tag tags--tag-0 tags--checked"]')
-        time.sleep(3)
+        time.sleep(1)
 
         page = 0
         while True:
-            Common.logger(log_type, crawler).info(f"正在抓取第{page + 1}页")
-            Common.logging(log_type, crawler, env, f"正在抓取第{page + 1}页")
             if cls.search_elements(driver, '//*[@class="bless--list"]') is None:
                 Common.logger(log_type, crawler).info("窗口已销毁\n")
                 Common.logging(log_type, crawler, env, '窗口已销毁\n')
+                cls.i = 0
+                cls.download_cnt = 0
+                cls.element_list = []
                 return
 
             cls.swipe_up(driver)
@@ -173,19 +180,32 @@ class ZFQZRecommend:
             soup.prettify()
 
             video_list_elements = soup.findAll("wx-view", class_="expose--adapt-parent")
-            Common.logger(log_type, crawler).info(f"第{page + 1}页共:{len(video_list_elements)}条视频\n")
-            Common.logging(log_type, crawler, env, f"第{page + 1}页共:{len(video_list_elements)}条视频\n")
+            # video_list_elements 有,cls.element_list 中没有的元素
+            video_list_elements = list(set(video_list_elements).difference(set(cls.element_list)))
+            # video_list_elements 与 cls.element_list 的并集
+            cls.element_list = list(set(video_list_elements) | set(cls.element_list))
+            Common.logger(log_type, crawler).info(f"正在抓取第{page + 1}页,共:{len(video_list_elements)}条视频")
+            Common.logging(log_type, crawler, env, f"正在抓取第{page + 1}页,共:{len(video_list_elements)}条视频")
+
+            if len(video_list_elements) == 0:
+                for i in range(10):
+                    Common.logger(log_type, crawler).info(f"向上滑动第{i+1}次")
+                    cls.swipe_up(driver)
+                    time.sleep(0.5)
+                continue
 
             for i, video_element in enumerate(video_list_elements):
                 try:
+                    Common.logger(log_type, crawler).info(f"本轮已抓取{cls.download_cnt}条视频\n")
+                    Common.logging(log_type, crawler, env, f"本轮已抓取{cls.download_cnt}条视频\n")
                     if cls.download_cnt >= int(rule_dict.get("videos_cnt", {}).get("min", 10)):
-                        Common.logger(log_type, crawler).info(f"本轮已抓取视频数:{cls.download_cnt}")
-                        Common.logging(log_type, crawler, env, f"本轮已抓取视频数:{cls.download_cnt}")
+                        cls.i = 0
                         cls.download_cnt = 0
+                        cls.element_list = []
                         return
-
-                    Common.logger(log_type, crawler).info(f"第{i + 1}条视频")
-                    Common.logging(log_type, crawler, env, f"第{i + 1}条视频")
+                    cls.i += 1
+                    Common.logger(log_type, crawler).info(f"第{cls.i}条视频")
+                    Common.logging(log_type, crawler, env, f"第{cls.i}条视频")
 
                     video_title = video_element.find("wx-view", class_="dynamic--title").text
                     play_str = video_element.find("wx-view", class_="dynamic--views").text
@@ -240,10 +260,14 @@ class ZFQZRecommend:
                     if video_title is None or cover_url is None:
                         Common.logger(log_type, crawler).info("无效视频\n")
                         Common.logging(log_type, crawler, env, '无效视频\n')
+                        cls.swipe_up(driver)
+                        time.sleep(0.5)
                     elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict,
                                        rule_dict=rule_dict) is False:
                         Common.logger(log_type, crawler).info("不满足抓取规则\n")
                         Common.logging(log_type, crawler, env, "不满足抓取规则\n")
+                        cls.swipe_up(driver)
+                        time.sleep(0.5)
                     elif any(str(word) if str(word) in video_dict["video_title"] else False
                              for word in get_config_from_mysql(log_type=log_type,
                                                                source=crawler,
@@ -252,9 +276,13 @@ class ZFQZRecommend:
                                                                action="")) is True:
                         Common.logger(log_type, crawler).info('已中过滤词\n')
                         Common.logging(log_type, crawler, env, '已中过滤词\n')
+                        cls.swipe_up(driver)
+                        time.sleep(0.5)
                     elif cls.repeat_video(log_type, crawler, out_video_id, env) != 0:
                         Common.logger(log_type, crawler).info('视频已下载\n')
                         Common.logging(log_type, crawler, env, '视频已下载\n')
+                        cls.swipe_up(driver)
+                        time.sleep(5)
                     else:
                         video_title_element = cls.search_elements(driver, f'//*[contains(text(), "{video_title}")]')
                         if video_title_element is None:
@@ -270,7 +298,7 @@ class ZFQZRecommend:
                             time.sleep(5)
                             continue
                         video_dict['video_url'] = video_url
-                        Common.logger(log_type, crawler).info(f"video_url:{video_url}\n")
+                        Common.logger(log_type, crawler).info(f"video_url:{video_url}")
 
                         video_dict["platform"] = crawler
                         video_dict["strategy"] = log_type
@@ -281,9 +309,8 @@ class ZFQZRecommend:
                         mq.send_msg(video_dict)
                         cls.download_cnt += 1
                         driver.press_keycode(AndroidKey.BACK)
-                        Common.logger(log_type, crawler).info("符合抓取条件,mq send msg 成功\n")
-                        Common.logging(log_type, crawler, env, "符合抓取条件,ACK MQ 成功\n")
                         time.sleep(5)
+                        cls.swipe_up(driver)
                 except Exception as e:
                     Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
                     Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")