Browse Source

update haitunzhufu

wangkun 1 year ago
parent
commit
d73743d5a6

+ 9 - 68
dev/dev_script/mitmproxy_test.py

@@ -1,80 +1,21 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
 # @Author: wangkun
 # @Author: wangkun
 # @Time: 2023/7/24
 # @Time: 2023/7/24
-import os
-import time
-
-from mitmproxy import http, proxy, options, proxyconfig
-from mitmproxy.proxy.config import ProxyConfig
-from mitmproxy.tools.dump import DumpMaster
-from selenium import webdriver
-from selenium.webdriver import DesiredCapabilities
-from selenium.webdriver.chrome.service import Service
+from mitmproxy import ctx
 
 
 
 
 class ProxyData:
 class ProxyData:
-    requests_data = []
-    response_data = []
-
-    @classmethod
-    def start_proxy(cls):
-        # 创建代理配置选项
-        opts = options.Options(listen_host='0.0.0.0', listen_port=8888)
-
-        # 创建代理配置
-        config = ProxyConfig(opts)
-
-        # 创建DumpMaster实例
-        master = DumpMaster(opts)
-        master.server = config
-
-        # 启动代理
-        print("Proxy started")
-        master.run()
+    def __init__(self):
+        self.num = 0
 
 
-    @classmethod
-    def intercept_request(cls, flow: http.HTTPFlow):
-        # 拦截请求
-        request_data = {
-            'url': flow.request.url,
-            'method': flow.request.method,
-            'headers': dict(flow.request.headers),
-            'content': flow.request.content.decode('utf-8')
-        }
-        cls.requests_data.append(request_data)
+    def request(self, flow):
+        self.num = self.num + 1
+        ctx.log.info("We've seen %d flows" % self.num)
 
 
-    @classmethod
-    def intercept_response(cls, flow: http.HTTPFlow):
-        # 拦截响应
-        response_data = {
-            'url': flow.request.url,
-            'status_code': flow.response.status_code,
-            'headers': dict(flow.response.headers),
-            'content': flow.response.content.decode('utf-8')
-        }
-        cls.response_data.append(response_data)
 
 
-    @classmethod
-    def start_selenium(cls):
-        quit_cmd = "ps aux | grep Chrome | grep -v grep | awk '{print $2}' | xargs kill -9"
-        os.system(quit_cmd)
-        time.sleep(1)
-        # 启动 Chrome,指定端口号:8888
-        cmd = 'open -a "Google Chrome" --args --remote-debugging-port=8888'
-        os.system(cmd)
-        # 打印请求配置
-        ca = DesiredCapabilities.CHROME
-        ca["goog:loggingPrefs"] = {"performance": "ALL"}
-        # 配置 chromedriver
-        chromedriver = "/Users/wangkun/Downloads/chromedriver/chromedriver_v114/chromedriver"
-        # 初始化浏览器
-        browser = webdriver.ChromeOptions()
-        browser.add_experimental_option("debuggerAddress", "127.0.0.1:8888")
-        # driver初始化
-        driver = webdriver.Chrome(desired_capabilities=ca, options=browser, service=Service(chromedriver))
-        driver.implicitly_wait(10)
-        print("打开抖音推荐页")
-        driver.get(f"https://www.douyin.com/")
+addons = [
+    ProxyData()
+]
 
 
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":

+ 1 - 2
haitunzhufu/haitunzhufu_recommend/haitunzhufu_recommend2.py

@@ -30,7 +30,6 @@ class HTZFRecommend:
     def today_download_cnt(cls, log_type, crawler, env):
     def today_download_cnt(cls, log_type, crawler, env):
         select_sql = """ SELECT COUNT(*) FROM crawler_video WHERE platform IN ("haitunzhufu", "海豚祝福") AND DATE(create_time) = CURDATE(); """
         select_sql = """ SELECT COUNT(*) FROM crawler_video WHERE platform IN ("haitunzhufu", "海豚祝福") AND DATE(create_time) = CURDATE(); """
         today_download_cnt = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")[0]['COUNT(*)']
         today_download_cnt = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")[0]['COUNT(*)']
-        # print(today_download_cnt)
         return today_download_cnt
         return today_download_cnt
 
 
     @classmethod
     @classmethod
@@ -42,7 +41,7 @@ class HTZFRecommend:
         Common.logger(log_type, crawler).info("启动微信")
         Common.logger(log_type, crawler).info("启动微信")
         caps = {
         caps = {
             "platformName": "Android",
             "platformName": "Android",
-            "platformVersion": "12",
+            "platformVersion": "11",
             "devicesName": "Android",
             "devicesName": "Android",
             "appPackage": "com.tencent.mm",
             "appPackage": "com.tencent.mm",
             "appActivity": ".ui.LauncherUI",
             "appActivity": ".ui.LauncherUI",

+ 6 - 4
zhufuquanzi/zhufuquanzi_main/run_zfqz_dev.py

@@ -6,7 +6,7 @@ import sys
 
 
 sys.path.append(os.getcwd())
 sys.path.append(os.getcwd())
 from common.common import Common
 from common.common import Common
-from zhufuquanzi.zhufuquanzi_recommend.zhufuquanzi_recommend import ZFQZRecommend
+from zhufuquanzi.zhufuquanzi_recommend.zhufuquanzi_recommend2 import ZFQZRecommend
 
 
 
 
 class ZFQZRecommendMain:
 class ZFQZRecommendMain:
@@ -16,9 +16,11 @@ class ZFQZRecommendMain:
         Common.logging(log_type, crawler, env, '开始抓取"祝福圈子"推荐')
         Common.logging(log_type, crawler, env, '开始抓取"祝福圈子"推荐')
         rule_dict = {"period": {"min": 365, "max": 365},
         rule_dict = {"period": {"min": 365, "max": 365},
                      "duration": {"min": 40, "max": 2400},
                      "duration": {"min": 40, "max": 2400},
-                     "play_cnt": {"min": 100000, "max": 0},
-                     "videos_cnt": {"min": 10, "max": 20},
-                     "like_cnt": {"min": 1000, "max": 0}}
+                     # "play_cnt": {"min": 100000, "max": 0},
+                     "play_cnt": {"min": 1, "max": 0},
+                     "videos_cnt": {"min": 10, "max": 0},
+                     # "like_cnt": {"min": 1000, "max": 0}}
+                     "like_cnt": {"min": 0, "max": 0}}
         ZFQZRecommend.start_wechat(log_type=log_type,
         ZFQZRecommend.start_wechat(log_type=log_type,
                                    crawler=crawler,
                                    crawler=crawler,
                                    rule_dict=rule_dict,
                                    rule_dict=rule_dict,

+ 302 - 0
zhufuquanzi/zhufuquanzi_recommend/zhufuquanzi_recommend2.py

@@ -0,0 +1,302 @@
+# -*- coding: utf-8 -*-
+# @Author: wang
+# @Time: 2023/9/6
+import json
+import os
+import sys
+import time
+from hashlib import md5
+
+from appium import webdriver
+from appium.webdriver.extensions.android.nativekey import AndroidKey
+from appium.webdriver.webdriver import WebDriver
+from bs4 import BeautifulSoup
+from selenium.common import NoSuchElementException
+from selenium.webdriver.common.by import By
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.mq import MQ
+from common.public import download_rule, get_config_from_mysql
+from common.scheduling_db import MysqlHelper
+
+
+class ZFQZRecommend:
+    platform = "祝福圈子"
+    download_cnt = 0
+
+    @classmethod
+    def start_wechat(cls, log_type, crawler, env, rule_dict, our_uid):
+        if env == "dev":
+            chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v111/chromedriver"
+        else:
+            chromedriverExecutable = "/Users/piaoquan/Downloads/chromedriver"
+
+        Common.logger(log_type, crawler).info("启动微信")
+        Common.logging(log_type, crawler, env, '启动微信')
+        caps = {
+            "platformName": "Android",
+            "devicesName": "Android",
+            "platformVersion": "7",
+            # "udid": "emulator-5554",
+            "appPackage": "com.tencent.mm",
+            "appActivity": ".ui.LauncherUI",
+            "autoGrantPermissions": "true",
+            "noReset": True,
+            "resetkeyboard": True,
+            "unicodekeyboard": True,
+            "showChromedriverLog": True,
+            "printPageSourceOnFailure": True,
+            "recreateChromeDriverSessions": True,
+            "enableWebviewDetailsCollection": True,
+            "setWebContentsDebuggingEnabled": True,
+            "newCommandTimeout": 6000,
+            "automationName": "UiAutomator2",
+            "chromedriverExecutable": chromedriverExecutable,
+            "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
+        }
+        driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
+        driver.implicitly_wait(30)
+
+        for i in range(120):
+            try:
+                if driver.find_elements(By.ID, "com.tencent.mm:id/f2s"):
+                    Common.logger(log_type, crawler).info("微信启动成功")
+                    Common.logging(log_type, crawler, env, '微信启动成功')
+                    break
+                elif driver.find_element(By.ID, "com.android.systemui:id/dismiss_view"):
+                    Common.logger(log_type, crawler).info("发现并关闭系统下拉菜单")
+                    Common.logging(log_type, crawler, env, '发现并关闭系统下拉菜单')
+                    driver.find_element(By.ID, "com.android.system:id/dismiss_view").click()
+                else:
+                    pass
+            except NoSuchElementException:
+                time.sleep(1)
+
+        Common.logger(log_type, crawler).info("下滑,展示小程序选择面板")
+        Common.logging(log_type, crawler, env, '下滑,展示小程序选择面板')
+        size = driver.get_window_size()
+        driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2),
+                     int(size['width'] * 0.5), int(size['height'] * 0.8), 200)
+        time.sleep(5)
+        Common.logger(log_type, crawler).info('打开小程序"祝福圈子"')
+        Common.logging(log_type, crawler, env, '打开小程序"祝福圈子"')
+        driver.find_elements(By.XPATH, '//*[@text="祝福圈子"]')[-1].click()
+        time.sleep(10)
+
+        cls.get_videoList(log_type, crawler, driver, env, rule_dict, our_uid)
+
+        time.sleep(3)
+        driver.quit()
+
+    @classmethod
+    def search_elements(cls, driver: WebDriver, xpath):
+        time.sleep(1)
+        windowHandles = driver.window_handles
+        for handle in windowHandles:
+            driver.switch_to.window(handle)
+            time.sleep(1)
+            try:
+                elements = driver.find_elements(By.XPATH, xpath)
+                if elements:
+                    return elements
+            except NoSuchElementException:
+                pass
+
+    @classmethod
+    def check_to_applet(cls, log_type, crawler, env, driver: WebDriver, xpath):
+        time.sleep(1)
+        webViews = driver.contexts
+        Common.logger(log_type, crawler).info(f"webViews:{webViews}")
+        Common.logging(log_type, crawler, env, f"webViews:{webViews}")
+        driver.switch_to.context(webViews[1])
+        windowHandles = driver.window_handles
+        for handle in windowHandles:
+            driver.switch_to.window(handle)
+            time.sleep(1)
+            try:
+                driver.find_element(By.XPATH, xpath)
+                Common.logger(log_type, crawler).info("切换到小程序成功\n")
+                Common.logging(log_type, crawler, env, '切换到小程序成功\n')
+                return
+            except NoSuchElementException:
+                time.sleep(1)
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform in ("众妙音信", "刚刚都传", "吉祥幸福", "知青天天看", "zhufuquanzi", "祝福圈子", "haitunzhufu", "海豚祝福") and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def swipe_up(cls, driver: WebDriver):
+        cls.search_elements(driver, '//*[@class="bless--list"]')
+        size = driver.get_window_size()
+        driver.swipe(int(size["width"] * 0.5), int(size["height"] * 0.8),
+                     int(size["width"] * 0.5), int(size["height"] * 0.4), 200)
+
+    @classmethod
+    def get_video_url(cls, log_type, crawler, driver: WebDriver, video_title_element):
+        for i in range(3):
+            cls.search_elements(driver, '//*[@class="bless--list"]')
+            Common.logger(log_type, crawler).info(f"video_title_element:{video_title_element[0]}")
+            Common.logger(log_type, crawler).info("点击标题")
+            video_title_element[0].click()
+            # driver.execute_script("arguments[0].click()", video_title_element[0])
+            Common.logger(log_type, crawler).info("点击标题完成")
+            time.sleep(5)
+            video_url_elements = cls.search_elements(driver, '//*[@class="index--video-item index--video"]')
+            if video_url_elements:
+                return video_url_elements[0].get_attribute("src")
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, driver: WebDriver, env, rule_dict, our_uid):
+        mq = MQ(topic_name="topic_crawler_etl_" + env)
+        driver.implicitly_wait(20)
+        cls.check_to_applet(log_type=log_type, crawler=crawler, env=env, driver=driver,
+                            xpath='//*[@class="tags--tag tags--tag-0 tags--checked"]')
+        time.sleep(3)
+
+        page = 0
+        while True:
+            Common.logger(log_type, crawler).info(f"正在抓取第{page + 1}页")
+            Common.logging(log_type, crawler, env, f"正在抓取第{page + 1}页")
+            if cls.search_elements(driver, '//*[@class="bless--list"]') is None:
+                Common.logger(log_type, crawler).info("窗口已销毁\n")
+                Common.logging(log_type, crawler, env, '窗口已销毁\n')
+                return
+
+            cls.swipe_up(driver)
+
+            page_source = driver.page_source
+            soup = BeautifulSoup(page_source, 'html.parser')
+            soup.prettify()
+
+            video_list_elements = soup.findAll("wx-view", class_="expose--adapt-parent")
+            Common.logger(log_type, crawler).info(f"第{page + 1}页共:{len(video_list_elements)}条视频\n")
+            Common.logging(log_type, crawler, env, f"第{page + 1}页共:{len(video_list_elements)}条视频\n")
+
+            for i, video_element in enumerate(video_list_elements):
+                try:
+                    if cls.download_cnt >= int(rule_dict.get("videos_cnt", {}).get("min", 10)):
+                        Common.logger(log_type, crawler).info(f"本轮已抓取视频数:{cls.download_cnt}")
+                        Common.logging(log_type, crawler, env, f"本轮已抓取视频数:{cls.download_cnt}")
+                        cls.download_cnt = 0
+                        return
+
+                    Common.logger(log_type, crawler).info(f"第{i + 1}条视频")
+                    Common.logging(log_type, crawler, env, f"第{i + 1}条视频")
+
+                    video_title = video_element.find("wx-view", class_="dynamic--title").text
+                    play_str = video_element.find("wx-view", class_="dynamic--views").text
+                    like_str = video_element.findAll("wx-view", class_="dynamic--commerce-btn-text")[0].text
+                    comment_str = video_element.findAll("wx-view", class_="dynamic--commerce-btn-text")[1].text
+                    duration_str = video_element.find("wx-view", class_="dynamic--duration").text
+                    user_name = video_element.find("wx-view", class_="dynamic--nick-top").text
+                    avatar_url = video_element.find("wx-image", class_="avatar--avatar")["src"]
+                    cover_url = video_element.find("wx-image", class_="dynamic--bg-image")["src"]
+
+                    play_cnt = int(play_str.replace("+", "").replace("次播放", ""))
+                    duration = int(duration_str.split(":")[0].strip()) * 60 + int(duration_str.split(":")[-1].strip())
+                    if "点赞" in like_str:
+                        like_cnt = 0
+                    elif "万" in like_str:
+                        like_cnt = int(like_str.split("万")[0]) * 10000
+                    else:
+                        like_cnt = int(like_str)
+                    if "评论" in comment_str:
+                        comment_cnt = 0
+                    elif "万" in comment_str:
+                        comment_cnt = int(comment_str.split("万")[0]) * 10000
+                    else:
+                        comment_cnt = int(comment_str)
+                    out_video_id = md5(video_title.encode('utf8')).hexdigest()
+                    out_user_id = md5(user_name.encode('utf8')).hexdigest()
+
+                    video_dict = {
+                        "video_title": video_title,
+                        "video_id": out_video_id,
+                        "duration_str": duration_str,
+                        "duration": duration,
+                        "play_str": play_str,
+                        "play_cnt": play_cnt,
+                        "like_str": like_str,
+                        "like_cnt": like_cnt,
+                        "comment_cnt": comment_cnt,
+                        "share_cnt": 0,
+                        "user_name": user_name,
+                        "user_id": out_user_id,
+                        'publish_time_stamp': int(time.time()),
+                        'publish_time_str': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
+                        "avatar_url": avatar_url,
+                        "cover_url": cover_url,
+                        "session": f"zhufuquanzi-{int(time.time())}"
+                    }
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
+                    # Common.logger(log_type, crawler).info(f"==========分割线==========\n")
+
+                    if video_title is None or cover_url is None:
+                        Common.logger(log_type, crawler).info("无效视频\n")
+                        Common.logging(log_type, crawler, env, '无效视频\n')
+                    elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict,
+                                       rule_dict=rule_dict) is False:
+                        Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                        Common.logging(log_type, crawler, env, "不满足抓取规则\n")
+                    elif any(str(word) if str(word) in video_dict["video_title"] else False
+                             for word in get_config_from_mysql(log_type=log_type,
+                                                               source=crawler,
+                                                               env=env,
+                                                               text="filter",
+                                                               action="")) is True:
+                        Common.logger(log_type, crawler).info('已中过滤词\n')
+                        Common.logging(log_type, crawler, env, '已中过滤词\n')
+                    elif cls.repeat_video(log_type, crawler, out_video_id, env) != 0:
+                        Common.logger(log_type, crawler).info('视频已下载\n')
+                        Common.logging(log_type, crawler, env, '视频已下载\n')
+                    else:
+                        video_title_element = cls.search_elements(driver, f'//*[contains(text(), "{video_title}")]')
+                        if video_title_element is None:
+                            Common.logger(log_type, crawler).warning(f"未找到该视频标题的element:{video_title_element}")
+                            Common.logging(log_type, crawler, env, f"未找到该视频标题的element:{video_title_element}")
+                            continue
+                        Common.logger(log_type, crawler).info("点击标题,进入视频详情页")
+                        Common.logging(log_type, crawler, env, "点击标题,进入视频详情页")
+                        video_url = cls.get_video_url(log_type, crawler, driver, video_title_element)
+                        if video_url is None:
+                            Common.logger(log_type, crawler).info("未获取到视频播放地址\n")
+                            driver.press_keycode(AndroidKey.BACK)
+                            time.sleep(5)
+                            continue
+                        video_dict['video_url'] = video_url
+                        Common.logger(log_type, crawler).info(f"video_url:{video_url}\n")
+
+                        video_dict["platform"] = crawler
+                        video_dict["strategy"] = log_type
+                        video_dict["out_video_id"] = video_dict["video_id"]
+                        video_dict["crawler_rule"] = json.dumps(rule_dict)
+                        video_dict["user_id"] = our_uid
+                        video_dict["publish_time"] = video_dict["publish_time_str"]
+                        mq.send_msg(video_dict)
+                        cls.download_cnt += 1
+                        driver.press_keycode(AndroidKey.BACK)
+                        Common.logger(log_type, crawler).info("符合抓取条件,mq send msg 成功\n")
+                        Common.logging(log_type, crawler, env, "符合抓取条件,ACK MQ 成功\n")
+                        time.sleep(5)
+                except Exception as e:
+                    Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+                    Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
+            Common.logger(log_type, crawler).info("已抓取完一组,休眠 5 秒\n")
+            Common.logging(log_type, crawler, env, "已抓取完一组,休眠 5 秒\n")
+            time.sleep(5)
+            page += 1
+
+
+if __name__ == "__main__":
+    rule_dict1 = {"period": {"min": 365, "max": 365},
+                  "duration": {"min": 30, "max": 1800},
+                  "favorite_cnt": {"min": 5000, "max": 0},
+                  "videos_cnt": {"min": 10, "max": 20},
+                  "share_cnt": {"min": 1000, "max": 0}}
+    ZFQZRecommend.start_wechat("recommend", "zhufuquanzi", "dev", rule_dict1, 6267141)