wangkun 1 tahun lalu
induk
melakukan
85cad3c60c

+ 1 - 1
README.MD

@@ -115,7 +115,7 @@ ps aux | grep shipinhao_search | grep -v grep | awk '{print $2}' | xargs kill -9
 #### CPU/MEMORY 监控
 ```commandline
 正式环境
-* * * * * /bin/sh /root/piaoquan_crawler/monitor/monitor_main/run_monitor.sh monitor/monitor_main/run_cpu_memory.py "cpumemory" "monitor" "prod"
+* * * * * /usr/bin/sh /root/piaoquan_crawler/monitor/monitor_main/run_monitor.sh monitor/monitor_main/run_cpu_memory.py "cpumemory" "monitor" "prod"
 线下调试
 sh monitor/monitor_main/run_monitor.sh monitor/monitor_main/run_cpu_memory.py "cpumemory" "monitor" "dev"
 检测进程

+ 14 - 0
common/public.py

@@ -4,6 +4,8 @@
 import requests
 from mq_http_sdk.mq_client import *
 from mq_http_sdk.mq_exception import MQExceptionBase
+from sklearn.feature_extraction.text import TfidfVectorizer
+from sklearn.metrics.pairwise import cosine_similarity
 import os, sys, jieba
 import time
 import random
@@ -26,6 +28,18 @@ def get_user_from_mysql(log_type, crawler, source, env, action=''):
         Common.logger(log_type, crawler).warning(f"爬虫:{crawler},没有查到抓取名单")
         return []
 
+def similarity(title1, title2):
+    # 分词
+    seg1 = jieba.lcut(title1)
+    seg2 = jieba.lcut(title2)
+
+    # 构建TF-IDF向量
+    tfidf_vectorizer = TfidfVectorizer()
+    tfidf_matrix = tfidf_vectorizer.fit_transform(["".join(seg1), "".join(seg2)])
+
+    # 计算余弦相似度
+    similar = cosine_similarity(tfidf_matrix[0], tfidf_matrix[1])[0][0]
+    return similar
 
 def title_like(log_type, crawler, platform, title, env):
     """

+ 89 - 0
dev/dev_script/mitmproxy_test.py

@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/24
+import os
+import time
+
+from mitmproxy import http, proxy, options, proxyconfig
+from mitmproxy.proxy.config import ProxyConfig
+from mitmproxy.tools.dump import DumpMaster
+from selenium import webdriver
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
+
+
+class ProxyData:
+    requests_data = []
+    response_data = []
+
+    @classmethod
+    def start_proxy(cls):
+        # 创建代理配置选项
+        opts = options.Options(listen_host='0.0.0.0', listen_port=8888)
+
+        # 创建代理配置
+        config = ProxyConfig(opts)
+
+        # 创建DumpMaster实例
+        master = DumpMaster(opts)
+        master.server = config
+
+        # 启动代理
+        print("Proxy started")
+        master.run()
+
+    @classmethod
+    def intercept_request(cls, flow: http.HTTPFlow):
+        # 拦截请求
+        request_data = {
+            'url': flow.request.url,
+            'method': flow.request.method,
+            'headers': dict(flow.request.headers),
+            'content': flow.request.content.decode('utf-8')
+        }
+        cls.requests_data.append(request_data)
+
+    @classmethod
+    def intercept_response(cls, flow: http.HTTPFlow):
+        # 拦截响应
+        response_data = {
+            'url': flow.request.url,
+            'status_code': flow.response.status_code,
+            'headers': dict(flow.response.headers),
+            'content': flow.response.content.decode('utf-8')
+        }
+        cls.response_data.append(response_data)
+
+    @classmethod
+    def start_selenium(cls):
+        quit_cmd = "ps aux | grep Chrome | grep -v grep | awk '{print $2}' | xargs kill -9"
+        os.system(quit_cmd)
+        time.sleep(1)
+        # 启动 Chrome,指定端口号:8888
+        cmd = 'open -a "Google Chrome" --args --remote-debugging-port=8888'
+        os.system(cmd)
+        # 打印请求配置
+        ca = DesiredCapabilities.CHROME
+        ca["goog:loggingPrefs"] = {"performance": "ALL"}
+        # 配置 chromedriver
+        chromedriver = "/Users/wangkun/Downloads/chromedriver/chromedriver_v114/chromedriver"
+        # 初始化浏览器
+        browser = webdriver.ChromeOptions()
+        browser.add_experimental_option("debuggerAddress", "127.0.0.1:8888")
+        # driver初始化
+        driver = webdriver.Chrome(desired_capabilities=ca, options=browser, service=Service(chromedriver))
+        driver.implicitly_wait(10)
+        print("打开抖音推荐页")
+        driver.get(f"https://www.douyin.com/")
+
+
+if __name__ == "__main__":
+    ProxyData.start_proxy()
+    ProxyData.start_selenium()
+    print("requests_data:", ProxyData.requests_data)
+    print("response_data:", ProxyData.response_data)
+
+    # 分析包含链接 www.douyin.com 的响应数据
+    for response in ProxyData.response_data:
+        if "www.douyin.com" in response['url']:
+            print("Douyin response:", response)

+ 33 - 0
dev/dev_script/shipinhao.py

@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/21
+import requests
+from bs4 import BeautifulSoup
+
+class Shipinhao:
+    @classmethod
+    def get_shipinhao(cls):
+        # 微信视频号推荐页面的URL
+        url = 'https://mp.weixin.qq.com/mp/videoplayer?action=get_recommend_video_list&__biz=MzI1OTQxMjE0Nw==&uin=&key=&pass_ticket=&wxtoken=777&devicetype=Windows+10&clientversion=1000&appmsg_token=cc11373ab7db78508003b6d2f46bab1a779666d3&f=json'
+
+        # 发送GET请求并获取响应
+        response = requests.get(url)
+
+        # 解析响应的JSON数据
+        data = response.json()
+        print(f'data: {data}')
+        # 解析推荐视频列表
+        video_list = data['recommend_video_list']
+        for video in video_list:
+            # 获取视频标题
+            title = video['title']
+            # 获取视频URL
+            video_url = video['video_url']
+            # 打印视频标题和URL
+            print(f'Title: {title}')
+            print(f'Video URL: {video_url}')
+            print('---')
+
+
+if __name__ == "__main__":
+    Shipinhao.get_shipinhao()

+ 30 - 0
dev/dev_script/title_like.py

@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/26
+import jieba
+from sklearn.feature_extraction.text import TfidfVectorizer
+from sklearn.metrics.pairwise import cosine_similarity
+
+
+class TitleLike:
+
+    @classmethod
+    def similarity(cls, title1, title2):
+        # 分词
+        seg1 = jieba.lcut(title1)
+        seg2 = jieba.lcut(title2)
+
+        # 构建TF-IDF向量
+        tfidf_vectorizer = TfidfVectorizer()
+        # tfidf_matrix = tfidf_vectorizer.fit_transform([title1, title2])
+        tfidf_matrix = tfidf_vectorizer.fit_transform(["".join(seg1), "".join(seg2)])
+
+        # 计算余弦相似度
+        similarity = cosine_similarity(tfidf_matrix[0], tfidf_matrix[1])[0][0]
+        return similarity
+
+if __name__ == "__main__":
+    t1 = """#发现未来 7月18日(发布)广东(发布)男生满心欢喜准备迎接喜欢的女孩 下一秒"""
+    t2 = "...7月18日(发布)广东(发布)男生满心欢喜准备迎接喜欢的女孩 下一秒其他出"
+    # t2 = "2月23日,广东。男子地铁口挥拳重击抱娃女子。网友:对于家暴零容忍"
+    print(TitleLike.similarity(t1, t2))

+ 5 - 1
requirements.txt

@@ -12,4 +12,8 @@ selenium~=4.2.0
 urllib3==1.26.9
 workalendar==17.0.0
 opencv-python~=4.8.0.74
-Appium-Python-Client~=2.8.1
+Appium-Python-Client~=2.8.1
+mitmproxy~=9.0.1
+bs4~=0.0.1
+beautifulsoup4~=4.11.1
+scikit-learn~=1.3.0

+ 33 - 0
shipinhao/shipinhao_main/run_sph_recommend_dev.py

@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/26
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.common import Common
+from shipinhao.shipinhao_recommend.recommend_h5 import RecommendH5
+from shipinhao.shipinhao_recommend.shipinhao_recommend import ShipinhaoRecommend
+
+
+class ShipinhaoRecommendMain:
+    @classmethod
+    def shipinhao_recommend_main(cls, log_type, crawler, env):
+        Common.logger(log_type, crawler).info("开始抓取视频号推荐\n")
+        rule_dict = {"period": {"min": 365, "max": 365},
+                     "duration": {"min": 10, "max": 1800},
+                     "favorite_cnt": {"min": 5000, "max": 0},
+                     "share_cnt": {"min": 1000, "max": 0}}
+        ShipinhaoRecommend.get_recommend_list(log_type=log_type,
+                                              crawler=crawler,
+                                              rule_dict=rule_dict,
+                                              env=env)
+        RecommendH5.download_videos(log_type=log_type,
+                                    crawler=crawler,
+                                    env=env,
+                                    rule_dict=rule_dict,
+                                    our_uid="6267140")
+        Common.logger(log_type, crawler).info("抓取一轮结束\n")
+
+
+if __name__ == "__main__":
+    ShipinhaoRecommendMain.shipinhao_recommend_main("recommend", "shipinhao", "dev")

+ 3 - 0
shipinhao/shipinhao_recommend/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/25

+ 238 - 0
shipinhao/shipinhao_recommend/recommend_h5.py

@@ -0,0 +1,238 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/26
+import json
+import os
+import sys
+import time
+from appium import webdriver
+from selenium.common import NoSuchElementException
+from appium.webdriver.webdriver import WebDriver
+from hashlib import md5
+from selenium.webdriver.common.by import By
+sys.path.append(os.getcwd())
+from common.mq import MQ
+from common.public import similarity
+from common.common import Common
+from shipinhao.shipinhao_recommend.shipinhao_recommend import ShipinhaoRecommend
+
+
+class RecommendH5:
+    platform = "视频号"
+
+    @classmethod
+    def start_wechat(cls, log_type, crawler, env):
+        Common.logger(log_type, crawler).info('启动微信')
+        if env == "dev":
+            chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver"
+        else:
+            chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
+        caps = {
+            "platformName": "Android",  # 手机操作系统 Android / iOS
+            "deviceName": "Android",  # 连接的设备名(模拟器或真机),安卓可以随便写
+            "platforVersion": "13",  # 手机对应的系统版本(Android 13)
+            "appPackage": "com.tencent.mm",  # 被测APP的包名,乐活圈 Android
+            "appActivity": ".ui.LauncherUI",  # 启动的Activity名
+            "autoGrantPermissions": True,  # 让 appium 自动授权 base 权限,
+            # 如果 noReset 为 True,则该条不生效(该参数为 Android 独有),对应的值为 True 或 False
+            "unicodekeyboard": True,  # 使用自带输入法,输入中文时填True
+            "resetkeyboard": True,  # 执行完程序恢复原来输入法
+            "noReset": True,  # 不重置APP
+            "recreateChromeDriverSessions": True,  # 切换到非 chrome-Driver 会 kill 掉 session,就不需要手动 kill 了
+            "printPageSourceOnFailure": True,  # 找不到元素时,appium log 会完整记录当前页面的 pagesource
+            "newCommandTimeout": 6000,  # 初始等待时间
+            "automationName": "UiAutomator2",  # 使用引擎,默认为 Appium,
+            # 其中 Appium、UiAutomator2、Selendroid、Espresso 用于 Android,XCUITest 用于 iOS
+            "showChromedriverLog": True,
+            # "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
+            "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
+            # "chromeOptions": {"androidProcess": "com.tencent.mm:toolsmp"},
+            # "chromeOptions": {"androidProcess": "com.tencent.mm"},
+            'enableWebviewDetailsCollection': True,
+            'setWebContentsDebuggingEnabled': True,
+            'chromedriverExecutable': chromedriverExecutable,
+        }
+        driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
+        driver.implicitly_wait(10)
+        time.sleep(5)
+        return driver
+
+    # 查找元素
+    @classmethod
+    def search_elements(cls, driver: WebDriver, xpath):
+        time.sleep(1)
+        windowHandles = driver.window_handles
+        for handle in windowHandles:
+            driver.switch_to.window(handle)
+            time.sleep(1)
+            try:
+                elements = driver.find_elements(By.XPATH, xpath)
+                if elements:
+                    return elements
+            except NoSuchElementException:
+                pass
+
+    # noinspection PyBroadException
+    @classmethod
+    def check_to_webview(cls, log_type, crawler, driver: WebDriver):
+        webviews = driver.contexts
+        Common.logger(log_type, crawler).info(f"webviews:{webviews}")
+        driver.switch_to.context(webviews[1])
+        Common.logger(log_type, crawler).info(driver.current_context)
+        time.sleep(1)
+        windowHandles = driver.window_handles
+        for handle in windowHandles:
+            try:
+                driver.switch_to.window(handle)
+                time.sleep(1)
+                driver.find_element(By.XPATH, '//div[@class="unit"]')
+                Common.logger(log_type, crawler).info('切换 webview 成功')
+                return "成功"
+            except Exception:
+                Common.logger(log_type, crawler).info("切换 webview 失败")
+
+    @classmethod
+    def search_video(cls, log_type, crawler, env, video_dict, rule_dict, our_uid):
+        mq = MQ(topic_name="topic_crawler_etl_" + env)
+        driver = cls.start_wechat(log_type, crawler, env)
+        # 点击微信搜索框,并输入搜索词
+        driver.implicitly_wait(10)
+        Common.logger(log_type, crawler).info("点击搜索框")
+        driver.find_element(By.ID, 'com.tencent.mm:id/j5t').click()  # 微信8.0.30版本
+        time.sleep(0.5)
+        driver.find_element(By.ID, 'com.tencent.mm:id/cd7').clear().send_keys(
+            video_dict['video_title'].replace('"', "").replace('“', "").replace('”', "").replace('#', ""))  # 微信8.0.30版本
+        # driver.press_keycode(AndroidKey.ENTER)
+        Common.logger(log_type, crawler).info("进入搜索词页面")
+        driver.find_element(By.ID, 'com.tencent.mm:id/m94').click()  # 微信8.0.30版本
+        time.sleep(5)
+
+        # 切换到微信搜索结果页 webview
+        check_to_webview = cls.check_to_webview(log_type, crawler, driver)
+        if check_to_webview is None:
+            Common.logger(log_type, crawler).info("切换到视频号 webview 失败\n")
+            Common.logging(log_type, crawler, env, "切换到视频号 webview 失败\n")
+            return
+        time.sleep(1)
+
+        # 切换到"视频号"分类
+        shipinhao_tags = cls.search_elements(driver, '//div[@class="unit"]/*[2]')
+        Common.logger(log_type, crawler).info('点击"视频号"分类')
+        Common.logging(log_type, crawler, env, '点击"视频号"分类')
+        shipinhao_tags[0].click()
+        time.sleep(5)
+
+        global h5_page
+        for i in range(3):
+            h5_page = cls.search_elements(driver, '//*[@class="mixed-box__bd"]')
+            if h5_page is None:
+                Common.logger(log_type, crawler).info('未发现H5页面')
+                driver.refresh()
+            else:
+                break
+
+        if h5_page is None:
+            driver.quit()
+            return
+
+        Common.logger(log_type, crawler).info('获取视频列表\n')
+        video_elements = cls.search_elements(driver, '//div[@class="rich-media active__absolute"]')
+        if video_elements is None:
+            Common.logger(log_type, crawler).warning(f'video_elements:{video_elements}')
+            return
+
+        for i, video_element in enumerate(video_elements):
+            try:
+                if video_element is None:
+                    Common.logger(log_type, crawler).info('到底啦~\n')
+                    return
+
+                Common.logger(log_type, crawler).info(f'拖动"视频"列表第{i + 1}条至屏幕中间')
+                time.sleep(3)
+                driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})",
+                                      video_element)
+                if len(video_element.find_elements(By.XPATH, "//*[@text='没有更多的搜索结果']")) != 0:
+                    Common.logger(log_type, crawler).info("没有更多的搜索结果\n")
+                    return
+                h5_video_title = \
+                video_element.find_elements(By.XPATH, '//div[@class="rich-media__title ellipsis_2"]/span')[i].text[:40]
+                h5_user_name = video_element.find_elements(By.XPATH, '//div[@class="rich-media__source__title"]')[
+                    i].text
+                h5_video_url = video_element.find_elements(By.XPATH, '//div[@class="video-player"]')[i].get_attribute(
+                    'src')
+                cover_url = video_element.find_elements(By.XPATH, '//div[@class="video-player__bd"]')[i].get_attribute(
+                    'style')
+                h5_cover_url = cover_url.split('url("')[-1].split('")')[0]
+                avatar_url = video_element.find_elements(By.XPATH,
+                                                         '//div[@class="ui-image-image ui-image rich-media__source__thumb"]')[
+                    i].get_attribute('style')
+                h5_avatar_url = avatar_url.split('url("')[-1].split('")')[0]
+                h5_out_video_id = md5(h5_video_title.encode('utf8')).hexdigest()
+                h5_out_user_id = md5(h5_user_name.encode('utf8')).hexdigest()
+
+                title_similarity = similarity(video_dict['video_title'], h5_video_title)
+                user_name_similarity = similarity(video_dict['user_name'], h5_user_name)
+
+                if title_similarity >= 0.5 and user_name_similarity >= 1.0:
+                    video_dict['cover_url'] = h5_cover_url
+                    video_dict['avatar_url'] = h5_avatar_url
+                    video_dict['out_video_id'] = h5_out_video_id
+                    video_dict['video_url'] = h5_video_url
+
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                    video_dict["out_user_id"] = h5_out_user_id
+                    video_dict["platform"] = crawler
+                    video_dict["strategy"] = log_type
+                    video_dict["out_video_id"] = h5_out_video_id
+                    video_dict["width"] = 0
+                    video_dict["height"] = 0
+                    video_dict["crawler_rule"] = json.dumps(rule_dict)
+                    video_dict["user_id"] = our_uid
+                    video_dict["publish_time"] = video_dict["publish_time_str"]
+                    mq.send_msg(video_dict)
+                    Common.logger(log_type, crawler).info("已抓取到目标视频\n")
+                    driver.quit()
+                    return
+                else:
+                    Common.logger(log_type, crawler).info(f"video_dict['video_title']:{video_dict['video_title']}")
+                    Common.logger(log_type, crawler).info(f"h5_video_title:{h5_video_title}")
+                    Common.logger(log_type, crawler).info(f"title_similarity:{title_similarity}")
+                    Common.logger(log_type, crawler).info(f"video_dict['user_name']:{video_dict['user_name']}")
+                    Common.logger(log_type, crawler).info(f"h5_user_name:{h5_user_name}")
+                    Common.logger(log_type, crawler).info(f"user_name_similarity:{user_name_similarity}\n")
+            except Exception as e:
+                Common.logger(log_type, crawler).info(f"抓取单条H5视频时异常:{e}\n")
+
+    @classmethod
+    def download_videos(cls, log_type, crawler, env, rule_dict, our_uid):
+        Common.logger(log_type, crawler).info(f'共{len(ShipinhaoRecommend.download_video_list)}条视频待抓取')
+        Common.logger(log_type, crawler).info(f'download_video_list:{ShipinhaoRecommend.download_video_list}\n')
+        if len(ShipinhaoRecommend.download_video_list) == 0:
+            Common.logger(log_type, crawler).info("没有待下载的视频\n")
+            return
+        for video_dict in ShipinhaoRecommend.download_video_list:
+            try:
+                cls.search_video(log_type, crawler, env, video_dict, rule_dict, our_uid)
+            except Exception as e:
+                Common.logger(log_type, crawler).info(f"抓取视频异常:{e}\n")
+
+
+if __name__ == "__main__":
+    ShipinhaoRecommend.download_video_list = [
+        {'video_title': '网友:不知道此时此刻黑车司机在想什么', 'video_id': '96bfb8b86965df7365f02373ce37fe87', 'duration': 21, 'user_name': '沂蒙晚报', 'like_cnt': 9575, 'share_cnt': 11000, 'favorite_cnt': 25000, 'comment_cnt': 5026, 'publish_time_str': '2023-07-25', 'publish_time_stamp': 1690214400, 'publish_time': 1690214400000, 'period': 1},
+        {'video_title': '女朋友这不就来了么', 'video_id': 'b1892886dca8c38dd6d72848ae4fd565', 'duration': 10, 'user_name': '向往的火焰蓝', 'like_cnt': 11000, 'share_cnt': 3701, 'favorite_cnt': 26000, 'comment_cnt': 1426, 'publish_time_str': '2023-07-26', 'publish_time_stamp': 1690300800, 'publish_time': 1690300800000, 'period': 0},
+        {'video_title': '近日,在韩国举办的2023世界跆拳道大赛上,中国选手出“奇招”,引网友点赞。关注', 'video_id': 'ebe8637a152c58bac2f1d875b257f9b5', 'duration': 10, 'user_name': '搜狐新闻', 'like_cnt': 9475, 'share_cnt': 9134, 'favorite_cnt': 18000, 'comment_cnt': 1770, 'publish_time_str': '2023-07-26', 'publish_time_stamp': 1690300800, 'publish_time': 1690300800000, 'period': 0},
+        {'video_title': '与愚者争论,自己就是愚者 #动画小故事  #哲理故事', 'video_id': '629abeb79f0de7a4dc45fadffc8ebc2b', 'duration': 32, 'user_name': '陈搞搞', 'like_cnt': 23000, 'share_cnt': 49000, 'favorite_cnt': 67000, 'comment_cnt': 1336, 'publish_time_str': '2023-07-24', 'publish_time_stamp': 1690128000, 'publish_time': 1690128000000, 'period': 2},
+        {'video_title': '我看不懂这种行为的意义在哪里,所以我决定坚持反复观看试图参悟其中的深意,', 'video_id': 'd7e6e1eeb519183d5e8665c92a101378', 'duration': 15, 'user_name': '蜡笔小星丶', 'like_cnt': 20000, 'share_cnt': 100000, 'favorite_cnt': 51000, 'comment_cnt': 9836, 'publish_time_str': '2023-07-25', 'publish_time_stamp': 1690214400, 'publish_time': 1690214400000, 'period': 1},
+        {'video_title': '女子一回家就开始脱衣服,不料老公的弟弟还在家里,女子下一秒的反应亮了!', 'video_id': 'c75472e887f2641acd34138b705cf8b9', 'duration': 11, 'user_name': '西米七七', 'like_cnt': 4335, 'share_cnt': 1107, 'favorite_cnt': 13000, 'comment_cnt': 1068, 'publish_time_str': '2023-07-26', 'publish_time_stamp': 1690300800, 'publish_time': 1690300800000, 'period': 0}]
+    RecommendH5.download_videos(log_type="recommend",
+                                crawler="shipinhao",
+                                env="dev",
+                                rule_dict={"period": {"min": 365, "max": 365},
+                                           "duration": {"min": 10, "max": 1800},
+                                           "favorite_cnt": {"min": 50000, "max": 0},
+                                           "share_cnt": {"min": 10000, "max": 0}},
+                                our_uid=6267140
+                                )

+ 260 - 0
shipinhao/shipinhao_recommend/shipinhao_recommend.py

@@ -0,0 +1,260 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/25
+import datetime
+import os
+import sys
+import time
+from datetime import date, timedelta
+from hashlib import md5
+from appium import webdriver
+from appium.webdriver.webdriver import WebDriver
+from selenium.common import NoSuchElementException
+from selenium.webdriver.common.by import By
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.public import download_rule
+from common.scheduling_db import MysqlHelper
+
+
+class ShipinhaoRecommend:
+    platform = "视频号"
+    download_video_list = []
+    scan_count = 20
+
+    @classmethod
+    def repeat_out_video_id(cls, log_type, crawler, out_video_id, env):
+        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{out_video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def start_wechat(cls, log_type, crawler, env):
+        Common.logger(log_type, crawler).info('启动微信')
+        if env == "dev":
+            chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver"
+        else:
+            chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
+        caps = {
+            "platformName": "Android",  # 手机操作系统 Android / iOS
+            "deviceName": "Android",  # 连接的设备名(模拟器或真机),安卓可以随便写
+            "platforVersion": "13",  # 手机对应的系统版本(Android 13)
+            "appPackage": "com.tencent.mm",  # 被测APP的包名,乐活圈 Android
+            "appActivity": ".ui.LauncherUI",  # 启动的Activity名
+            "autoGrantPermissions": True,  # 让 appium 自动授权 base 权限,
+            # 如果 noReset 为 True,则该条不生效(该参数为 Android 独有),对应的值为 True 或 False
+            "unicodekeyboard": True,  # 使用自带输入法,输入中文时填True
+            "resetkeyboard": True,  # 执行完程序恢复原来输入法
+            "noReset": True,  # 不重置APP
+            "recreateChromeDriverSessions": True,  # 切换到非 chrome-Driver 会 kill 掉 session,就不需要手动 kill 了
+            "printPageSourceOnFailure": True,  # 找不到元素时,appium log 会完整记录当前页面的 pagesource
+            "newCommandTimeout": 6000,  # 初始等待时间
+            "automationName": "UiAutomator2",  # 使用引擎,默认为 Appium,
+            # 其中 Appium、UiAutomator2、Selendroid、Espresso 用于 Android,XCUITest 用于 iOS
+            "showChromedriverLog": True,
+            # "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
+            "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
+            # "chromeOptions": {"androidProcess": "com.tencent.mm:toolsmp"},
+            # "chromeOptions": {"androidProcess": "com.tencent.mm"},
+            'enableWebviewDetailsCollection': True,
+            'setWebContentsDebuggingEnabled': True,
+            'chromedriverExecutable': chromedriverExecutable,
+        }
+        driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
+        driver.implicitly_wait(10)
+        time.sleep(5)
+        return driver
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, rule_dict, env, driver: WebDriver):
+        Common.logger(log_type, crawler).info("进入发现页")
+        tabs = driver.find_elements(By.ID, "com.tencent.mm:id/f2s")
+        for tab in tabs:
+            if tab.text == "发现":
+                tab.click()
+                time.sleep(0.5)
+                break
+
+        Common.logger(log_type, crawler).info('点击"视频号"')
+        textviews = driver.find_elements(By.ID, "android:id/title")
+        for textview in textviews:
+            if textview.text == "视频号":
+                textview.click()
+                time.sleep(0.5)
+                break
+
+        # 关闭青少年模式弹框
+        Common.logger(log_type, crawler).info("尝试关闭青少年模式弹框\n")
+        try:
+            driver.find_element(By.ID, "com.tencent.mm:id/lqz").click()
+        except NoSuchElementException:
+            pass
+
+        for i in range(cls.scan_count):
+            Common.logger(log_type, crawler).info(f"第{i + 1}条视频")
+            if len(driver.find_elements(By.ID, "com.tencent.mm:id/dkf")) != 0:
+                Common.logger(log_type, crawler).info("这是一个直播间,滑动至下一个视频\n")
+                driver.swipe(10, 1600, 10, 300, 200)
+                continue
+            video_dict = cls.get_video_info(driver)
+            for k, v in video_dict.items():
+                Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+            if video_dict["video_title"] is None:
+                Common.logger(log_type, crawler).info("无效视频")
+            elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                Common.logger(log_type, crawler).info("不满足抓取规则")
+                # Common.logging(log_type, crawler, env, "不满足抓取规则\n")
+            elif cls.repeat_out_video_id(log_type, crawler, video_dict["video_id"], env) != 0:
+                Common.logger(log_type, crawler).info('视频已下载')
+                # Common.logging(log_type, crawler, env, '视频已下载\n')
+            else:
+                cls.download_video_list.append(video_dict)
+            if i+1 == cls.scan_count:
+                Common.logger(log_type, crawler).info("扫描一轮结束\n")
+                driver.quit()
+                return
+            Common.logger(log_type, crawler).info(f"已抓取符合规则视频{len(cls.download_video_list)}条,滑动至下一个视频\n")
+            driver.swipe(10, 1600, 10, 300, 200)
+
+    @classmethod
+    def is_contain_chinese(cls, strword):
+        for ch in strword:
+            if u'\u4e00' <= ch <= u'\u9fff':
+                return True
+        return False
+
+    @classmethod
+    def get_video_info(cls, driver: WebDriver):
+
+        # 点击暂停
+        global duration
+        for i in range(3):
+            try:
+                driver.find_element(By.ID, "com.tencent.mm:id/gpx").click()
+                duration_str = driver.find_element(By.ID, "com.tencent.mm:id/l7i").text
+                duration = int(duration_str.split(":")[0]) * 60 + int(duration_str.split(":")[-1])
+                break
+            except NoSuchElementException:
+                duration = 0
+
+        # user_name
+        user_name = driver.find_element(By.ID, "com.tencent.mm:id/hft").text
+
+        # 点赞
+        like_cnt = driver.find_element(By.ID, 'com.tencent.mm:id/k04').text
+        if like_cnt == "" or like_cnt == "喜欢" or like_cnt == "火":
+            like_cnt = 0
+        elif '万+' in like_cnt:
+            like_cnt = int(float(like_cnt.split('万+')[0]) * 10000)
+        elif '万' in like_cnt:
+            like_cnt = int(float(like_cnt.split('万')[0]) * 10000)
+        else:
+            like_cnt = int(float(like_cnt))
+
+        # 分享
+        share_cnt = driver.find_element(By.ID, 'com.tencent.mm:id/jhv').text
+        if share_cnt == "" or share_cnt == "转发":
+            share_cnt = 0
+        elif '万+' in share_cnt:
+            share_cnt = int(float(share_cnt.split('万+')[0]) * 10000)
+        elif '万' in share_cnt:
+            share_cnt = int(float(share_cnt.split('万')[0]) * 10000)
+        else:
+            share_cnt = int(float(share_cnt))
+
+        # 收藏
+        favorite_cnt = driver.find_element(By.ID, 'com.tencent.mm:id/fnp').text
+        if favorite_cnt == "" or favorite_cnt == "收藏" or favorite_cnt == "推荐" or favorite_cnt == "火":
+            favorite_cnt = 0
+        elif '万+' in favorite_cnt:
+            favorite_cnt = int(float(favorite_cnt.split('万+')[0]) * 10000)
+        elif '万' in favorite_cnt:
+            favorite_cnt = int(float(favorite_cnt.split('万')[0]) * 10000)
+        else:
+            favorite_cnt = int(float(favorite_cnt))
+
+        # 评论
+        comment_id = driver.find_element(By.ID, 'com.tencent.mm:id/bje')
+        comment_cnt = comment_id.text
+        if comment_cnt == "" or comment_cnt == "评论":
+            comment_cnt = 0
+        elif '万+' in comment_cnt:
+            comment_cnt = int(float(comment_cnt.split('万+')[0]) * 10000)
+        elif '万' in comment_cnt:
+            comment_cnt = int(float(comment_cnt.split('万')[0]) * 10000)
+        else:
+            comment_cnt = int(float(comment_cnt))
+
+        comment_id.click()
+        time.sleep(1)
+
+        # title
+        title = driver.find_elements(By.ID, "com.tencent.mm:id/bga")[0].text.replace("\n", " ")[:40]
+
+        # 发布时间
+        publish_time = driver.find_element(By.ID, "com.tencent.mm:id/bre").get_attribute("name")
+        if "秒" in publish_time or "分钟" in publish_time or "小时" in publish_time:
+            publish_time_str = (date.today() + timedelta(days=0)).strftime("%Y-%m-%d")
+        elif "天前" in publish_time:
+            days = int(publish_time.replace("天前", ""))
+            publish_time_str = (date.today() + timedelta(days=-days)).strftime("%Y-%m-%d")
+        elif "年" in publish_time:
+            year_str = publish_time.split("年")[0]
+            month_str = publish_time.split("年")[-1].split("月")[0]
+            day_str = publish_time.split("月")[-1].split("日")[0]
+            if int(month_str) < 10:
+                month_str = f"0{month_str}"
+            if int(day_str) < 10:
+                day_str = f"0{day_str}"
+            publish_time_str = f"{year_str}-{month_str}-{day_str}"
+        else:
+            year_str = str(datetime.datetime.now().year)
+            month_str = publish_time.split("月")[0]
+            day_str = publish_time.split("月")[-1].split("日")[0]
+            if int(month_str) < 10:
+                month_str = f"0{month_str}"
+            if int(day_str) < 10:
+                day_str = f"0{day_str}"
+            publish_time_str = f"{year_str}-{month_str}-{day_str}"
+        publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y-%m-%d")))
+
+        # 收起评论
+        driver.find_element(By.ID, "com.tencent.mm:id/be_").click()
+        time.sleep(0.5)
+
+        video_id = md5(title.encode('utf8')).hexdigest()
+        video_dict = {
+            "video_title": title,
+            "video_id": video_id,
+            "duration": duration,
+            "user_name": user_name,
+            "like_cnt": like_cnt,
+            "share_cnt": share_cnt,
+            "favorite_cnt": favorite_cnt,
+            "comment_cnt": comment_cnt,
+            "publish_time_str": publish_time_str,
+            "publish_time_stamp": publish_time_stamp,
+        }
+        return video_dict
+
+    @classmethod
+    def get_recommend_list(cls, log_type, crawler, rule_dict, env):
+        driver = cls.start_wechat(log_type, crawler, env)
+        cls.get_videoList(log_type=log_type,
+                          crawler=crawler,
+                          rule_dict=rule_dict,
+                          env=env,
+                          driver=driver)
+        driver.quit()
+        Common.logger(log_type, crawler).info(f"微信退出成功\n")
+
+
+if __name__ == "__main__":
+    rule_dict1 = {"period": {"min": 365, "max": 365},
+                 "duration": {"min": 10, "max": 1800},
+                 "favorite_cnt": {"min": 50000, "max": 0},
+                 "share_cnt": {"min": 10000, "max": 0}}
+    ShipinhaoRecommend.get_recommend_list("recommend", "shipinhao", rule_dict1, "dev")
+    print(ShipinhaoRecommend.download_video_list)
+    pass

+ 117 - 0
xigua/xigua_main/run_xg_recommend2.py

@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/27
+import argparse
+import random
+
+from mq_http_sdk.mq_client import *
+from mq_http_sdk.mq_consumer import *
+from mq_http_sdk.mq_exception import MQExceptionBase
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.public import get_consumer, ack_message, task_fun_mq
+from common.scheduling_db import MysqlHelper
+from xigua.xigua_recommend.xg_recommend2 import XiguaRecommend
+
+
+def main(log_type, crawler, topic_name, group_id, env):
+    consumer = get_consumer(topic_name, group_id)
+    # 长轮询表示如果Topic没有消息,则客户端请求会在服务端挂起3秒,3秒内如果有消息可以消费则立即返回响应。
+    # 长轮询时间3秒(最多可设置为30秒)。
+    wait_seconds = 30
+    # 一次最多消费3条(最多可设置为16条)。
+    batch = 1
+    Common.logger(log_type, crawler).info(f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                          f'WaitSeconds:{wait_seconds}\n'
+                                          f'TopicName:{topic_name}\n'
+                                          f'MQConsumer:{group_id}')
+    Common.logging(log_type, crawler, env, f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                           f'WaitSeconds:{wait_seconds}\n'
+                                           f'TopicName:{topic_name}\n'
+                                           f'MQConsumer:{group_id}')
+    while True:
+        try:
+            # 长轮询消费消息。
+            recv_msgs = consumer.consume_message(batch, wait_seconds)
+            for msg in recv_msgs:
+                xg_recommend_start_time = int(time.time())
+                Common.logger(log_type, crawler).info(f"Receive\n"
+                                                      f"MessageId:{msg.message_id}\n"
+                                                      f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                      f"MessageTag:{msg.message_tag}\n"
+                                                      f"ConsumedTimes:{msg.consumed_times}\n"
+                                                      f"PublishTime:{msg.publish_time}\n"
+                                                      f"Body:{msg.message_body}\n"
+                                                      f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                      f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                      f"Properties:{msg.properties}")
+                Common.logging(log_type, crawler, env, f"Receive\n"
+                                                       f"MessageId:{msg.message_id}\n"
+                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                       f"MessageTag:{msg.message_tag}\n"
+                                                       f"ConsumedTimes:{msg.consumed_times}\n"
+                                                       f"PublishTime:{msg.publish_time}\n"
+                                                       f"Body:{msg.message_body}\n"
+                                                       f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                       f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                       f"Properties:{msg.properties}")
+                # ack_mq_message
+                ack_message(log_type=log_type, crawler=crawler, recv_msgs=recv_msgs, consumer=consumer)
+
+                # 处理爬虫业务
+                task_dict = task_fun_mq(msg.message_body)['task_dict']
+                rule_dict = task_fun_mq(msg.message_body)['rule_dict']
+                task_id = task_dict['id']
+                select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+                user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+                our_uid_list = []
+                for user in user_list:
+                    our_uid_list.append(user["uid"])
+                our_uid = random.choice(our_uid_list)
+                Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+                Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
+                Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+                Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}")
+                Common.logger(log_type, crawler).info(f"共{len(user_list)}个用户:\n{user_list}\n")
+                Common.logging(log_type, crawler, env, f"共{len(user_list)}个用户:\n{user_list}\n")
+                Common.logger(log_type, crawler).info(f'开始抓取:{task_dict["taskName"]}\n')
+                Common.logging(log_type, crawler, env, f'开始抓取:{task_dict["taskName"]}\n')
+                XiguaRecommend.get_videoList(log_type=log_type,
+                                             crawler=crawler,
+                                             rule_dict=rule_dict,
+                                             our_uid=our_uid,
+                                             env=env)
+                # Common.del_logs(log_type, crawler)
+                Common.logger(log_type, crawler).info('抓取一轮结束\n')
+                Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+                xg_recommend_end_time = int(time.time())
+                xg_recommend_duration = xg_recommend_start_time - xg_recommend_end_time
+                Common.logger(log_type, crawler).info(f"duration {xg_recommend_duration}")
+                Common.logging(log_type, crawler, env, f"duration {xg_recommend_duration}")
+        except MQExceptionBase as err:
+            # Topic中没有消息可消费。
+            if err.type == "MessageNotExist":
+                Common.logger(log_type, crawler).info(f"No new message! RequestId:{err.req_id}\n")
+                Common.logging(log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n")
+                continue
+
+            Common.logger(log_type, crawler).info(f"Consume Message Fail! Exception:{err}\n")
+            Common.logging(log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n")
+            time.sleep(2)
+            continue
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--topic_name')  ## 添加参数
+    parser.add_argument('--group_id')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         topic_name=args.topic_name,
+         group_id=args.group_id,
+         env=args.env)

+ 324 - 0
xigua/xigua_recommend/xg_recommend2.py

@@ -0,0 +1,324 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/10
+import base64
+import datetime
+import json
+import os
+import random
+import string
+import subprocess
+import sys
+import time
+import requests
+import urllib3
+import re
+from requests.adapters import HTTPAdapter
+from selenium import webdriver
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
+from selenium.webdriver.chrome.webdriver import WebDriver
+from selenium.webdriver.common.by import By
+sys.path.append(os.getcwd())
+from common.mq import MQ
+from common.feishu import Feishu
+from common.public import download_rule, get_config_from_mysql
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from common.userAgent import get_random_user_agent
+
+
+class XiguaRecommend:
+    platform = "xigua"
+
+    @classmethod
+    def random_signature(cls):
+        src_digits = string.digits  # string_数字
+        src_uppercase = string.ascii_uppercase  # string_大写字母
+        src_lowercase = string.ascii_lowercase  # string_小写字母
+        digits_num = random.randint(1, 6)
+        uppercase_num = random.randint(1, 26 - digits_num - 1)
+        lowercase_num = 26 - (digits_num + uppercase_num)
+        password = random.sample(src_digits, digits_num) + random.sample(src_uppercase, uppercase_num) + random.sample(
+            src_lowercase, lowercase_num)
+        random.shuffle(password)
+        new_password = 'AAAAAAAAAA' + ''.join(password)[10:-4] + 'AAAB'
+        new_password_start = new_password[0:18]
+        new_password_end = new_password[-7:]
+        if new_password[18] == '8':
+            new_password = new_password_start + 'w' + new_password_end
+        elif new_password[18] == '9':
+            new_password = new_password_start + 'x' + new_password_end
+        elif new_password[18] == '-':
+            new_password = new_password_start + 'y' + new_password_end
+        elif new_password[18] == '.':
+            new_password = new_password_start + 'z' + new_password_end
+        else:
+            new_password = new_password_start + 'y' + new_password_end
+        return new_password
+
+    @classmethod
+    def get_video_url(cls, video_info):
+        video_url_dict = {}
+
+        video_resource = video_info.get('videoResource', {})
+        dash_120fps = video_resource.get('dash_120fps', {})
+        normal = video_resource.get('normal', {})
+
+        # 从dash_120fps和normal字典中获取video_list字典
+        video_list = dash_120fps.get('video_list', {}) or normal.get('video_list', {})
+        # 获取video_list字典中的video_4、video_3、video_2或video_1的值。如果找到非空视频URL,则将其赋值给变量video_url。否则,将赋值为空字符串。
+        video = video_list.get('video_4') or video_list.get('video_3') or video_list.get('video_2') or video_list.get('video_1')
+
+        video_url = video.get('backup_url_1', '') if video else ''
+        audio_url = video.get('backup_url_1', '') if video else ''
+        video_width = video.get('vwidth', 0) if video else 0
+        video_height = video.get('vheight', 0) if video else 0
+
+        video_url = re.sub(r'[^a-zA-Z0-9+/=]', '', video_url)  # 从视频URL中删除特殊字符
+        audio_url = re.sub(r'[^a-zA-Z0-9+/=]', '', audio_url)  # 从音频URL中删除特殊字符
+
+        video_url = base64.b64decode(video_url).decode('utf8')  # 解码视频URL
+        audio_url = base64.b64decode(audio_url).decode('utf8')  # 解码音频URL
+
+        video_url_dict["video_url"] = video_url
+        video_url_dict["audio_url"] = audio_url
+        video_url_dict["video_width"] = video_width
+        video_url_dict["video_height"] = video_height
+
+        return video_url_dict
+
+    @classmethod
+    def get_comment_cnt(cls, item_id):
+        url = "https://www.ixigua.com/tlb/comment/article/v5/tab_comments/?"
+        params = {
+            "tab_index": "0",
+            "count": "10",
+            "offset": "10",
+            "group_id": str(item_id),
+            "item_id": str(item_id),
+            "aid": "1768",
+            "msToken": "50-JJObWB07HfHs-BMJWT1eIDX3G-6lPSF_i-QwxBIXE9VVa-iN0jbEXR5pG2DKjXBmP299n6ZTuXzY-GAy968CCvouSAYIS4GzvGQT3pNlKNejr5G4-1g==",
+            "X-Bogus": "DFSzswVOyGtANVeWtCLMqR/F6q9U",
+            "_signature": cls.random_signature(),
+        }
+        headers = {
+            'authority': 'www.ixigua.com',
+            'accept': 'application/json, text/plain, */*',
+            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
+            'cache-control': 'no-cache',
+            'cookie': 'MONITOR_WEB_ID=67cb5099-a022-4ec3-bb8e-c4de6ba51dd0; passport_csrf_token=72b2574f3c99f8ba670e42df430218fd; passport_csrf_token_default=72b2574f3c99f8ba670e42df430218fd; sid_guard=c7472b508ea631823ba765a60cf8757f%7C1680867422%7C3024002%7CFri%2C+12-May-2023+11%3A37%3A04+GMT; uid_tt=c13f47d51767f616befe32fb3e9f485a; uid_tt_ss=c13f47d51767f616befe32fb3e9f485a; sid_tt=c7472b508ea631823ba765a60cf8757f; sessionid=c7472b508ea631823ba765a60cf8757f; sessionid_ss=c7472b508ea631823ba765a60cf8757f; sid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; ssid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; odin_tt=b893608d4dde2e1e8df8cd5d97a0e2fbeafc4ca762ac72ebef6e6c97e2ed19859bb01d46b4190ddd6dd17d7f9678e1de; SEARCH_CARD_MODE=7168304743566296612_0; support_webp=true; support_avif=false; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; tt_scid=7Pux7s634-z8DYvCM20y7KigwH5u7Rh6D9C-RROpnT.aGMEcz6Vsxp.oai47wJqa4f86; ttwid=1%7CHHtv2QqpSGuSu8r-zXF1QoWsvjmNi1SJrqOrZzg-UCY%7C1683858689%7Ca5223fe1500578e01e138a0d71d6444692018296c4c24f5885af174a65873c95; ixigua-a-s=3; msToken=50-JJObWB07HfHs-BMJWT1eIDX3G-6lPSF_i-QwxBIXE9VVa-iN0jbEXR5pG2DKjXBmP299n6ZTuXzY-GAy968CCvouSAYIS4GzvGQT3pNlKNejr5G4-1g==; __ac_nonce=0645dcbf0005064517440; __ac_signature=_02B4Z6wo00f01FEGmAwAAIDBKchzCGqn-MBRJpyAAHAjieFC5GEg6gGiwz.I4PRrJl7f0GcixFrExKmgt6QI1i1S-dQyofPEj2ugWTCnmKUdJQv-wYuDofeKNe8VtMtZq2aKewyUGeKU-5Ud21; ixigua-a-s=3',
+            'pragma': 'no-cache',
+            'referer': f'https://www.ixigua.com/{item_id}?logTag=3c5aa86a8600b9ab8540',
+            'sec-ch-ua': '"Microsoft Edge";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
+            'sec-ch-ua-mobile': '?0',
+            'sec-ch-ua-platform': '"macOS"',
+            'sec-fetch-dest': 'empty',
+            'sec-fetch-mode': 'cors',
+            'sec-fetch-site': 'same-origin',
+            'tt-anti-token': 'cBITBHvmYjEygzv-f9c78c1297722cf1f559c74b084e4525ce4900bdcf9e8588f20cc7c2e3234422',
+            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35',
+            'x-secsdk-csrf-token': '000100000001f8e733cf37f0cd255a51aea9a81ff7bc0c09490cfe41ad827c3c5c18ec809279175e4d9f5553d8a5'
+        }
+        urllib3.disable_warnings()
+        s = requests.session()
+        # max_retries=3 重试3次
+        s.mount('http://', HTTPAdapter(max_retries=3))
+        s.mount('https://', HTTPAdapter(max_retries=3))
+        response = s.get(url=url, headers=headers, params=params, verify=False, proxies=Common.tunnel_proxies(),
+                         timeout=5)
+        response.close()
+        if response.status_code != 200 or 'total_number' not in response.json() or response.json() == {}:
+            return 0
+        return response.json().get("total_number", 0)
+
+    # 获取视频详情
+    @classmethod
+    def get_video_info(cls, log_type, crawler, item_id):
+        url = 'https://www.ixigua.com/api/mixVideo/information?'
+        headers = {
+            "accept-encoding": "gzip, deflate",
+            "accept-language": "zh-CN,zh-Hans;q=0.9",
+            "user-agent": get_random_user_agent('pc'),
+            "referer": "https://www.ixigua.com/7102614741050196520?logTag=0531c88ac04f38ab2c62",
+        }
+        params = {
+            'mixId': str(item_id),
+            'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfC'
+                       'NVVIOBNjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
+            'X-Bogus': 'DFSzswVupYTANCJOSBk0P53WxM-r',
+            '_signature': '_02B4Z6wo0000119LvEwAAIDCuktNZ0y5wkdfS7jAALThuOR8D9yWNZ.EmWHKV0WSn6Px'
+                          'fPsH9-BldyxVje0f49ryXgmn7Tzk-swEHNb15TiGqa6YF.cX0jW8Eds1TtJOIZyfc9s5emH7gdWN94',
+        }
+        cookies = {
+            'ixigua-a-s': '1',
+            'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfCNVVIOB'
+                       'NjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
+            'ttwid': '1%7C_yXQeHWwLZgCsgHClOwTCdYSOt_MjdOkgnPIkpi-Sr8%7C1661241238%7Cf57d0c5ef3f1d7'
+                     '6e049fccdca1ac54887c34d1f8731c8e51a49780ff0ceab9f8',
+            'tt_scid': 'QZ4l8KXDG0YAEaMCSbADdcybdKbUfG4BC6S4OBv9lpRS5VyqYLX2bIR8CTeZeGHR9ee3',
+            'MONITOR_WEB_ID': '0a49204a-7af5-4e96-95f0-f4bafb7450ad',
+            '__ac_nonce': '06304878000964fdad287',
+            '__ac_signature': '_02B4Z6wo00f017Rcr3AAAIDCUVxeW1tOKEu0fKvAAI4cvoYzV-wBhq7B6D8k0no7lb'
+                              'FlvYoinmtK6UXjRIYPXnahUlFTvmWVtb77jsMkKAXzAEsLE56m36RlvL7ky.M3Xn52r9t1IEb7IR3ke8',
+            'ttcid': 'e56fabf6e85d4adf9e4d91902496a0e882',
+            '_tea_utm_cache_1300': 'undefined',
+            'support_avif': 'false',
+            'support_webp': 'false',
+            'xiguavideopcwebid': '7134967546256016900',
+            'xiguavideopcwebid.sig': 'xxRww5R1VEMJN_dQepHorEu_eAc',
+        }
+        urllib3.disable_warnings()
+        s = requests.session()
+        # max_retries=3 重试3次
+        s.mount('http://', HTTPAdapter(max_retries=3))
+        s.mount('https://', HTTPAdapter(max_retries=3))
+        response = s.get(url=url, headers=headers, params=params, cookies=cookies, verify=False,
+                         proxies=Common.tunnel_proxies(), timeout=5)
+        response.close()
+        if response.status_code != 200 or 'data' not in response.json() or response.json()['data'] == {}:
+            Common.logger(log_type, crawler).warning(f"get_video_info:{response.status_code}, {response.text}\n")
+            return None
+        else:
+            video_info = response.json()['data'].get("gidInformation", {}).get("packerData", {}).get("video", {})
+            if video_info == {}:
+                return None
+            video_dict = {
+                "video_title": video_info.get("title", ""),
+                "video_id": video_info.get("videoResource", {}).get("vid", ""),
+                "gid": str(item_id),
+                "play_cnt": int(video_info.get("video_watch_count", 0)),
+                "like_cnt": int(video_info.get("video_like_count", 0)),
+                "comment_cnt": int(cls.get_comment_cnt(item_id)),
+                "share_cnt": 0,
+                "favorite_cnt": 0,
+                "duration": int(video_info.get("video_duration", 0)),
+                "video_width": int(cls.get_video_url(video_info)["video_width"]),
+                "video_height": int(cls.get_video_url(video_info)["video_height"]),
+                "publish_time_stamp": int(video_info.get("video_publish_time", 0)),
+                "publish_time_str": time.strftime("%Y-%m-%d %H:%M:%S",
+                                                  time.localtime(int(video_info.get("video_publish_time", 0)))),
+                "user_name": video_info.get("user_info", {}).get("name", ""),
+                "user_id": str(video_info.get("user_info", {}).get("user_id", "")),
+                "avatar_url": str(video_info.get("user_info", {}).get("avatar_url", "")),
+                "cover_url": video_info.get("poster_url", ""),
+                "audio_url": cls.get_video_url(video_info)["audio_url"],
+                "video_url": cls.get_video_url(video_info)["video_url"],
+                "session": f"xigua-search-{int(time.time())}"
+            }
+            return video_dict
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def quit(cls, log_type, crawler, env, driver: WebDriver):
+        Common.logger(log_type, crawler).info("退出浏览器")
+        Common.logging(log_type, crawler, env, "退出浏览器")
+        driver.quit()
+        quit_cmd = "ps aux | grep Chrome | grep -v grep | awk '{print $2}' | xargs kill -9"
+        os.system(quit_cmd)
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
+        mq = MQ(topic_name="topic_crawler_etl_" + env)
+        Common.logger(log_type, crawler).info("启动 Chrome 浏览器")
+        Common.logging(log_type, crawler, env, "启动 Chrome 浏览器")
+        # kill 所有 Chrome 进程
+        quit_cmd = "ps aux | grep Chrome | grep -v grep | awk '{print $2}' | xargs kill -9"
+        os.system(quit_cmd)
+        time.sleep(1)
+        # 启动 Chrome,指定端口号:12306
+        cmd = 'open -a "Google Chrome" --args --remote-debugging-port=12306'
+        os.system(cmd)
+        # 打印请求配置
+        ca = DesiredCapabilities.CHROME
+        ca["goog:loggingPrefs"] = {"performance": "ALL"}
+        # 配置 chromedriver
+        if env == "dev":
+            chromedriver = "/Users/wangkun/Downloads/chromedriver/chromedriver_v114/chromedriver"
+        else:
+            # chromedriver = "/usr/bin/chromedriver"
+            chromedriver = "/Users/piaoquan/Downloads/chromedrivers/chromedriver_v114/chromedriver"
+        # 初始化浏览器
+        browser = webdriver.ChromeOptions()
+        # browser.add_argument(f'--proxy-server={Common.tunnel_proxies()}')  # 代理的IP地址和端口号
+        browser.add_experimental_option("debuggerAddress", "127.0.0.1:12306")
+        # driver初始化
+        driver = webdriver.Chrome(desired_capabilities=ca, options=browser, service=Service(chromedriver))
+        driver.implicitly_wait(10)
+        Common.logger(log_type, crawler).info("打开西瓜推荐页")
+        Common.logging(log_type, crawler, env, "打开西瓜推荐页")
+        driver.get(f"https://www.ixigua.com/")
+        time.sleep(2)
+
+        # 检查登录状态
+        if len(driver.find_elements(By.XPATH, '//*[@class="BU-Component-Header-Avatar__image"]')) == 0:
+            Common.logger(log_type, crawler).info("登录失效")
+            Common.logging(log_type, crawler, env, "登录失效")
+            driver.get_screenshot_as_file(f"./{crawler}/photos/logon_err.png")
+            # 登录失效,报警
+            if 20 >= datetime.datetime.now().hour >= 10:
+                Feishu.bot(log_type, crawler, "西瓜推荐,登录失效")
+            return
+
+        videoList_elements = driver.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard HorizontalChannelBlockList__item"]')
+        if len(videoList_elements) == 0:
+            Common.logger(log_type, crawler).info("到底啦~~~~~~~~~~\n")
+            Common.logging(log_type, crawler, env, "到底啦~~~~~~~~~~\n")
+            cls.quit(log_type, crawler, env, driver)
+            return
+        for i, video_element in enumerate(videoList_elements):
+            Common.logger(log_type, crawler).info(f"正在抓取第{i+1}条视频")
+            Common.logging(log_type, crawler, env, f"正在抓取第{i+1}条视频")
+            item_id = video_element.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard__coverWrapper disableZoomAnimation"]')[i].get_attribute("href")
+            item_id = item_id.replace("https://www.ixigua.com/", "").replace("?&", "")
+            Common.logger(log_type, crawler).info(f"item_id:{item_id}")
+            video_dict = cls.get_video_info(log_type, crawler, item_id)
+            if video_dict is None:
+                Common.logger(log_type, crawler).info("无效视频\n")
+                Common.logging(log_type, crawler, env, "无效视频\n")
+                continue
+            for k, v in video_dict.items():
+                Common.logger(log_type, crawler).info(f"{k}:{v}")
+            Common.logging(log_type, crawler, env, f"{video_dict}")
+
+            if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                Common.logging(log_type, crawler, env, "不满足抓取规则\n")
+            elif any(str(word) if str(word) in video_dict["video_title"] else False
+                     for word in get_config_from_mysql(log_type=log_type,
+                                                       source=crawler,
+                                                       env=env,
+                                                       text="filter",
+                                                       action="")) is True:
+                Common.logger(log_type, crawler).info('已中过滤词\n')
+                Common.logging(log_type, crawler, env, '已中过滤词\n')
+            elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
+                Common.logger(log_type, crawler).info('视频已下载\n')
+                Common.logging(log_type, crawler, env, '视频已下载\n')
+            else:
+                video_dict["out_user_id"] = video_dict["user_id"]
+                video_dict["platform"] = crawler
+                video_dict["strategy"] = log_type
+                video_dict["out_video_id"] = video_dict["video_id"]
+                video_dict["width"] = video_dict["video_width"]
+                video_dict["height"] = video_dict["video_height"]
+                video_dict["crawler_rule"] = json.dumps(rule_dict)
+                video_dict["user_id"] = our_uid
+                video_dict["publish_time"] = video_dict["publish_time_str"]
+                video_dict["strategy_type"] = log_type
+                mq.send_msg(video_dict)
+        cls.quit(log_type, crawler, env, driver)
+
+
+if __name__ == "__main__":
+    XiguaRecommend.get_videoList(log_type="recommend",
+                                 crawler="xigua",
+                                 our_uid=6267140,
+                                 rule_dict={},
+                                 env="dev")
+    pass