فهرست منبع

Merge remote-tracking branch 'origin/master'

crawler 1 سال پیش
والد
کامیت
e7b1005aa5

+ 80 - 6
fuqiwang/fuqiwang_main/run_fqw_recommend.py

@@ -6,14 +6,14 @@ from mq_http_sdk.mq_client import *
 from mq_http_sdk.mq_consumer import *
 from mq_http_sdk.mq_exception import MQExceptionBase
 
-
 sys.path.append(os.getcwd())
 from fuqiwang.fuqiwang_recommend.fqw_recommend import FqwRecommend
-
+from common.aliyun_log import AliyunLogger
 from common.common import Common
 from common.public import get_consumer, ack_message, task_fun_mq
 from common.scheduling_db import MysqlHelper
 
+
 def main(log_type, crawler, topic_name, group_id, env):
     consumer = get_consumer(topic_name, group_id)
     # 长轮询表示如果Topic没有消息,则客户端请求会在服务端挂起3秒,3秒内如果有消息可以消费则立即返回响应。
@@ -29,6 +29,15 @@ def main(log_type, crawler, topic_name, group_id, env):
                                            f'WaitSeconds:{wait_seconds}\n'
                                            f'TopicName:{topic_name}\n'
                                            f'MQConsumer:{group_id}')
+    AliyunLogger.logging(
+        code="1000",
+        platform=crawler,
+        mode=log_type,
+        env=env,
+        message=f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                f'WaitSeconds:{wait_seconds}\n'
+                f'TopicName:{topic_name}\n'
+                f'MQConsumer:{group_id}')
     while True:
         try:
             # 长轮询消费消息。
@@ -54,6 +63,22 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                        f"NextConsumeTime:{msg.next_consume_time}\n"
                                                        f"ReceiptHandle:{msg.receipt_handle}\n"
                                                        f"Properties:{msg.properties}")
+                AliyunLogger.logging(
+                    code="1000",
+                    platform=crawler,
+                    mode=log_type,
+                    env=env,
+                    message=f"Receive\n"
+                            f"MessageId:{msg.message_id}\n"
+                            f"MessageBodyMD5:{msg.message_body_md5}\n"
+                            f"MessageTag:{msg.message_tag}\n"
+                            f"ConsumedTimes:{msg.consumed_times}\n"
+                            f"PublishTime:{msg.publish_time}\n"
+                            f"Body:{msg.message_body}\n"
+                            f"NextConsumeTime:{msg.next_consume_time}\n"
+                            f"ReceiptHandle:{msg.receipt_handle}\n"
+                            f"Properties:{msg.properties}"
+                )
                 # ack_mq_message
                 ack_message(log_type=log_type, crawler=crawler, recv_msgs=recv_msgs, consumer=consumer)
 
@@ -69,29 +94,78 @@ def main(log_type, crawler, topic_name, group_id, env):
                 our_uid = random.choice(our_uid_list)
                 Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
                 Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
+                AliyunLogger.logging(
+                    code="1000",
+                    platform=crawler,
+                    mode=log_type,
+                    env=env,
+                    message=f"调度任务:{task_dict}"
+                )
                 Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
                 Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}")
+                AliyunLogger.logging(
+                    code="1000",
+                    platform=crawler,
+                    mode=log_type,
+                    env=env,
+                    message=f"抓取规则:{rule_dict}"
+                )
                 Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
+                AliyunLogger.logging(
+                    code="1000",
+                    platform=crawler,
+                    mode=log_type,
+                    env=env,
+                    message=f"用户列表:{user_list}\n"
+                )
                 Common.logger(log_type, crawler).info(f'开始抓取:{task_dict["taskName"]}\n')
                 Common.logging(log_type, crawler, env, f'开始抓取:{task_dict["taskName"]}\n')
+                AliyunLogger.logging(
+                    code="1000",
+                    platform=crawler,
+                    mode=log_type,
+                    env=env,
+                    message=f'开始抓取:{task_dict["taskName"]}\n'
+                )
                 FqwRecommend.get_videoList(log_type=log_type,
-                                                crawler=crawler,
-                                                rule_dict=rule_dict,
-                                                our_uid=our_uid,
-                                                env=env)
+                                           crawler=crawler,
+                                           rule_dict=rule_dict,
+                                           our_uid=our_uid,
+                                           env=env)
                 Common.del_charles_files(log_type, crawler)
                 Common.logger(log_type, crawler).info('抓取一轮结束\n')
                 Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+                AliyunLogger.logging(
+                    code="1000",
+                    platform=crawler,
+                    mode=log_type,
+                    env=env,
+                    message='抓取一轮结束\n'
+                )
 
         except MQExceptionBase as err:
             # Topic中没有消息可消费。
             if err.type == "MessageNotExist":
                 Common.logger(log_type, crawler).info(f"No new message! RequestId:{err.req_id}\n")
                 Common.logging(log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n")
+                AliyunLogger.logging(
+                    code="2000",
+                    platform=crawler,
+                    mode=log_type,
+                    env=env,
+                    message=f"No new message! RequestId:{err.req_id}\n"
+                )
                 continue
 
             Common.logger(log_type, crawler).info(f"Consume Message Fail! Exception:{err}\n")
             Common.logging(log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n")
+            AliyunLogger.logging(
+                code="2000",
+                platform=crawler,
+                mode=log_type,
+                env=env,
+                message=f"Consume Message Fail! Exception:{err}\n"
+            )
             time.sleep(2)
             continue
 

+ 94 - 31
fuqiwang/fuqiwang_recommend/fqw_recommend.py

@@ -2,17 +2,21 @@ import json
 import random
 import re
 import time
+import uuid
 import requests
 
 from common.common import Common
 from common.scheduling_db import MysqlHelper
 from common.mq import MQ
+from common.aliyun_log import AliyunLogger
+from common.pipeline import PiaoQuanPipeline
 from common.public import download_rule, get_config_from_mysql
 
 proxies = {"http": None, "https": None}
 
+
 class FqwRecommend:
-    platform = ("福气旺")
+    platform = "福气旺"
     download_cnt = 0
     element_list = []
     i = 0
@@ -32,6 +36,13 @@ class FqwRecommend:
             try:
                 Common.logger(log_type, crawler).info(f"正在抓取第{page}页")
                 Common.logging(log_type, crawler, env, f"正在抓取第{page}页")
+                AliyunLogger.logging(
+                    code="2000",
+                    platform=crawler,
+                    mode=log_type,
+                    env=env,
+                    message=f"正在抓取第{page}页"
+                )
                 url = "https://api.xinghetime.com/luckvideo/video/getRecommendVideos"
                 payload = json.dumps({
                     "baseParam": {
@@ -54,14 +65,35 @@ class FqwRecommend:
                 if "data" not in r.text or r.status_code != 200:
                     Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
                     Common.logging(log_type, crawler, env, f"get_videoList:{r.text}\n")
+                    AliyunLogger.logging(
+                        code="2000",
+                        platform=crawler,
+                        mode=log_type,
+                        env=env,
+                        message=f"get_videoList:{r.text}\n"
+                    )
                     return
                 elif "data" not in r.json():
                     Common.logger(log_type, crawler).info(f"get_videoList:{r.json()}\n")
                     Common.logging(log_type, crawler, env, f"get_videoList:{r.json()}\n")
+                    AliyunLogger.logging(
+                        code="2000",
+                        platform=crawler,
+                        mode=log_type,
+                        env=env,
+                        message=f"get_videoList:{r.json()}\n"
+                    )
                     return
                 elif len(r.json()["data"]) == 0:
                     Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']['list']}\n")
                     Common.logging(log_type, crawler, env, f"get_videoList:{r.json()['data']['list']}\n")
+                    AliyunLogger.logging(
+                        code="2000",
+                        platform=crawler,
+                        mode=log_type,
+                        env=env,
+                        message=f"get_videoList:{r.json()['data']['list']}\n"
+                    )
                     return
                 else:
                     # 视频列表
@@ -74,6 +106,15 @@ class FqwRecommend:
                                 cls.element_list = []
                                 return
                             cls.i += 1
+                            trace_id = crawler + str(uuid.uuid1())
+                            AliyunLogger.logging(
+                                code="1001",
+                                platform=crawler,
+                                mode=log_type,
+                                env=env,
+                                message="扫描到一条视频",
+                                trace_id=trace_id
+                            )
                             video_title = feeds[i].get("title", "").strip().replace("\n", "") \
                                 .replace("/", "").replace("\\", "").replace("\r", "") \
                                 .replace(":", "").replace("*", "").replace("?", "") \
@@ -90,7 +131,6 @@ class FqwRecommend:
                                 result = number[0]
                             else:
                                 result = 0
-
                             time_str = feeds[i]["durationFormat"]
                             minutes, seconds = map(int, time_str.split(':'))
                             # 计算总秒数
@@ -100,6 +140,7 @@ class FqwRecommend:
                                 "video_id": str(feeds[i]["videoId"]),  # 视频id
                                 "publish_time_stamp": publish_time_stamp,
                                 "publish_time_str": publish_time_str,
+                                "update_time_stamp": int(time.time()),
                                 "category_id": int(feeds[i].get("category_id", 0)),  # 视频来源(精彩推荐)
                                 "cover_url": feeds[i].get("coverImagePath", ""),  # 视频封面
                                 "video_url": feeds[i]["videoPath"],  # 视频链接
@@ -119,38 +160,48 @@ class FqwRecommend:
                             for k, v in video_dict.items():
                                 Common.logger(log_type, crawler).info(f"{k}:{v}")
                             Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
-
+                            video_dict["out_user_id"] = video_dict["user_id"]
+                            video_dict["platform"] = crawler
+                            video_dict["strategy"] = log_type
+                            video_dict["out_video_id"] = video_dict["video_id"]
+                            video_dict["width"] = video_dict["video_width"]
+                            video_dict["height"] = video_dict["video_height"]
+                            video_dict["crawler_rule"] = json.dumps(rule_dict)
+                            video_dict["user_id"] = our_uid
+                            video_dict["publish_time"] = video_dict["publish_time_str"]
                             if video_dict["video_id"] == "" or video_dict["video_title"] == "" or video_dict[
                                 "video_url"] == "":
                                 Common.logger(log_type, crawler).info("无效视频\n")
                                 Common.logging(log_type, crawler, env, "无效视频\n")
-                            elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict,
-                                               rule_dict=rule_dict) is False:
-                                Common.logger(log_type, crawler).info("不满足抓取规则\n")
-                                Common.logging(log_type, crawler, env, "不满足抓取规则\n")
-                            elif any(str(word) if str(word) in video_dict["video_title"] else False
-                                     for word in get_config_from_mysql(log_type=log_type,
-                                                                       source=crawler,
-                                                                       env=env,
-                                                                       text="filter",
-                                                                       action="")) is True:
-                                Common.logger(log_type, crawler).info('已中过滤词\n')
-                                Common.logging(log_type, crawler, env, '已中过滤词\n')
-                            elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
-                                Common.logger(log_type, crawler).info('视频已下载\n')
-                                Common.logging(log_type, crawler, env, '视频已下载\n')
-
-                            else:
-                                video_dict["out_user_id"] = video_dict["user_id"]
-                                video_dict["platform"] = crawler
-                                video_dict["strategy"] = log_type
-                                video_dict["out_video_id"] = video_dict["video_id"]
-                                video_dict["width"] = video_dict["video_width"]
-                                video_dict["height"] = video_dict["video_height"]
-                                video_dict["crawler_rule"] = json.dumps(rule_dict)
-                                video_dict["user_id"] = our_uid
-                                video_dict["publish_time"] = video_dict["publish_time_str"]
+                                AliyunLogger.logging(
+                                    code="2005",
+                                    platform=crawler,
+                                    mode=log_type,
+                                    env=env,
+                                    message="无效视频",
+                                    data=video_dict,
+                                    trace_id=trace_id
+                                )
+                                continue
+                            pipeline = PiaoQuanPipeline(
+                                platform=crawler,
+                                mode=log_type,
+                                trace_id=trace_id,
+                                item=video_dict,
+                                rule_dict=rule_dict,
+                                env=env
+                            )
+                            if pipeline.process_item():
                                 mq.send_msg(video_dict)
+                                AliyunLogger.logging(
+                                    code="1002",
+                                    platform=crawler,
+                                    mode=log_type,
+                                    env=env,
+                                    trace_id=trace_id,
+                                    data=video_dict,
+                                    message="成功发送至ETL"
+                                )
                                 cls.download_cnt += 1
                                 interval = random.randrange(5, 11)
                                 time.sleep(interval)
@@ -158,12 +209,24 @@ class FqwRecommend:
                         except Exception as e:
                             Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
                             Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
+                            AliyunLogger.logging(
+                                code="3000",
+                                platform=crawler,
+                                mode=log_type,
+                                env=env,
+                                message=f"抓取单条视频异常:{e}\n"
+                            )
                 page += 1
             except Exception as e:
                 Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
                 Common.logging(log_type, crawler, env, f"抓取第{page}页时异常:{e}\n")
-
-
+                AliyunLogger.logging(
+                    code="3000",
+                    platform=crawler,
+                    mode=log_type,
+                    env=env,
+                    message=f"抓取第{page}页时异常:{e}\n"
+                )
 
 
 if __name__ == "__main__":

+ 339 - 0
xiaoniangaoplus/xiaoniangaoplus/xiaoniangao_plus_get_userid.py

@@ -0,0 +1,339 @@
+# -*- coding: utf-8 -*-
+# @Time: 2023/11/14
+import json
+import os
+import random
+import sys
+import time
+from datetime import date, timedelta
+
+import requests
+from appium import webdriver
+from appium.webdriver.extensions.android.nativekey import AndroidKey
+from bs4 import BeautifulSoup
+from selenium.common.exceptions import NoSuchElementException
+from selenium.webdriver.common.by import By
+import multiprocessing
+
+from common.public import clean_title, get_config_from_mysql
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.mq import MQ
+from common.scheduling_db import MysqlHelper
+
+
+def get_redirect_url(url):
+    res = requests.get(url, allow_redirects=False)
+    if res.status_code == 302 or res.status_code == 301:
+        return res.headers['Location']
+    else:
+        return url
+
+class XiaoNianGaoPlusRecommend:
+    env = None
+    driver = None
+    log_type = None
+
+    def __init__(self, log_type, crawler, env, rule_dict, our_uid):
+        self.mq = None
+        self.platform = "小年糕"
+        self.download_cnt = 0
+        self.element_list = []
+        self.count = 0
+        self.swipe_count = 0
+        self.log_type = log_type
+        self.crawler = crawler
+        self.env = env
+        self.rule_dict = rule_dict
+        self.our_uid = our_uid
+        if self.env == "dev":
+            chromedriverExecutable = "/Users/tzld/Downloads/chromedriver_V111/chromedriver"
+        else:
+            chromedriverExecutable = "/Users/tzld/Downloads/chromedriver_v111/chromedriver"
+
+        Common.logger(self.log_type, self.crawler).info("启动微信")
+        # 微信的配置文件
+        caps = {
+            "platformName": "Android",
+            "devicesName": "Android",
+            "appPackage": "com.tencent.mm",
+            "appActivity": ".ui.LauncherUI",
+            "autoGrantPermissions": "true",
+            "noReset": True,
+            "resetkeyboard": True,
+            "unicodekeyboard": True,
+            "showChromedriverLog": True,
+            "printPageSourceOnFailure": True,
+            "recreateChromeDriverSessions": True,
+            "enableWebviewDetailsCollection": True,
+            "setWebContentsDebuggingEnabled": True,
+            "newCommandTimeout": 6000,
+            "automationName": "UiAutomator2",
+            "chromedriverExecutable": chromedriverExecutable,
+            "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
+        }
+        self.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
+        self.driver.implicitly_wait(30)
+
+        for i in range(120):
+            try:
+                if self.driver.find_elements(By.ID, "com.tencent.mm:id/f2s"):
+                    Common.logger(self.log_type, self.crawler).info("微信启动成功")
+                    break
+                elif self.driver.find_element(By.ID, "com.android.systemui:id/dismiss_view"):
+                    Common.logger(self.log_type, self.crawler).info("发现并关闭系统下拉菜单")
+                    self.driver.find_element(By.ID, "com.android.system:id/dismiss_view").click()
+                else:
+                    pass
+            except NoSuchElementException:
+                time.sleep(1)
+
+        Common.logger(self.log_type, self.crawler).info("下滑,展示小程序选择面板")
+        size = self.driver.get_window_size()
+        self.driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2),
+                          int(size['width'] * 0.5), int(size['height'] * 0.8), 200)
+        time.sleep(1)
+        Common.logger(self.log_type, self.crawler).info('打开小程序"小年糕+"')
+        self.driver.find_elements(By.XPATH, '//*[@text="小年糕+"]')[-1].click()
+        time.sleep(5)
+        self.get_videoList()
+        time.sleep(1)
+        self.driver.quit()
+
+    def search_elements(self, xpath):
+        time.sleep(1)
+        windowHandles = self.driver.window_handles
+        for handle in windowHandles:
+            self.driver.switch_to.window(handle)
+            time.sleep(1)
+            try:
+                elements = self.driver.find_elements(By.XPATH, xpath)
+                if elements:
+                    return elements
+            except NoSuchElementException:
+                pass
+
+    def check_to_applet(self, xpath):
+        time.sleep(1)
+        webViews = self.driver.contexts
+        self.driver.switch_to.context(webViews[-1])
+        windowHandles = self.driver.window_handles
+        for handle in windowHandles:
+            self.driver.switch_to.window(handle)
+            time.sleep(1)
+            try:
+                self.driver.find_element(By.XPATH, xpath)
+                Common.logger(self.log_type, self.crawler).info("切换到WebView成功\n")
+                return
+            except NoSuchElementException:
+                time.sleep(1)
+
+
+    def swipe_up(self):
+        self.search_elements('//*[@class="list-list--list"]')
+        size = self.driver.get_window_size()
+        self.driver.swipe(int(size["width"] * 0.5), int(size["height"] * 0.8),
+                          int(size["width"] * 0.5), int(size["height"] * 0.442), 200)
+        self.swipe_count += 1
+
+    def get_video_url(self, video_title_element):
+        for i in range(3):
+            self.search_elements('//*[@class="list-list--list"]')
+            Common.logger(self.log_type, self.crawler).info(f"video_title_element:{video_title_element[0]}")
+            time.sleep(1)
+            Common.logger(self.log_type, self.crawler).info("滑动标题至可见状态")
+            self.driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'});",
+                                       video_title_element[0])
+            time.sleep(3)
+            Common.logger(self.log_type, self.crawler).info("点击标题")
+            video_title_element[0].click()
+            self.check_to_applet(xpath=r'//wx-video[@class="dynamic-index--video-item dynamic-index--video"]')
+            Common.logger(self.log_type, self.crawler).info("点击标题完成")
+            time.sleep(10)
+            video_url_elements = self.search_elements(
+                '//wx-video[@class="dynamic-index--video-item dynamic-index--video"]')
+            if video_url_elements:
+                return video_url_elements[0].get_attribute("src")
+
+    def parse_detail(self, index):
+        page_source = self.driver.page_source
+        soup = BeautifulSoup(page_source, 'html.parser')
+        soup.prettify()
+        video_list = soup.findAll(name="wx-view", attrs={"class": "expose--adapt-parent"})
+        element_list = [i for i in video_list][index:]
+        return element_list[0]
+
+    def get_video_info_2(self, video_element):
+        Common.logger(self.log_type, self.crawler).info(f"本轮已抓取{self.download_cnt}条视频\n")
+        if self.download_cnt >= int(self.rule_dict.get("videos_cnt", {}).get("min", 10)):
+            self.count = 0
+            self.download_cnt = 0
+            self.element_list = []
+            return
+        self.count += 1
+        Common.logger(self.log_type, self.crawler).info(f"第{self.count}条视频")
+        # 标题
+        video_title = video_element.find("wx-view", class_="dynamic--title").text
+        # 用户名称
+        user_name = video_element.find("wx-view", class_="dynamic--nick-top").text
+
+        video_title_element = self.search_elements(f'//*[contains(text(), "{video_title}")]')
+        if video_title_element is None:
+            Common.logger(self.log_type, self.crawler).warning(
+                f"未找到该视频标题的element:{video_title_element}")
+            return
+        Common.logger(self.log_type, self.crawler).info("点击标题,进入视频详情页")
+        self.get_video_url(video_title_element)
+
+        video_mid_elements = self.search_elements("//wx-view[@class='bar--navBar-content-capsule']")
+        mid = int(video_mid_elements[0].get_attribute("data-mid"))
+        repeat_video_id= self.repeat_video_id(mid)
+        data_list = []
+        if repeat_video_id != 0:
+            Common.logger(self.log_type, self.crawler).info(f"该用户已经存在")
+            status = 1
+            self.insert_user(mid, user_name, data_list, status)
+
+            self.driver.press_keycode(AndroidKey.BACK)
+            return
+
+        data_list = self.get_user_list(mid)
+        if len(data_list) == 0:
+            Common.logger(self.log_type, self.crawler).info(f"不满足抓取条件")
+            self.driver.press_keycode(AndroidKey.BACK)
+            return
+        else:
+            status  = 1
+            self.insert_user(mid, user_name, data_list, status)
+            Common.logger(self.log_type, self.crawler).info(f"{mid}:{user_name}入库")
+            self.driver.press_keycode(AndroidKey.BACK)
+            time.sleep(2)
+
+    def insert_user(self, mid, user_name, data_list, status):
+        insert_sql = f"""insert into crawler_xng_userid( user_id , user_name , user_title_text , status) values ({mid},"{user_name}", "{data_list}",{status})"""
+        MysqlHelper.update_values(self.log_type, self.crawler, insert_sql, self.env, action='')
+
+    def get_user_list(self, mid):
+        next_t = -1
+        url = "https://kapi-xng-app.xiaoniangao.cn/v1/album/user_public"
+        headers = {
+            'Host': 'kapi-xng-app.xiaoniangao.cn',
+            'content-type': 'application/json; charset=utf-8',
+            'accept': '*/*',
+            'authorization': 'hSNQ2s9pvPxvFn4LaQJxKQ6/7Is=',
+            'verb': 'POST',
+            'content-md5': 'c7b7f8663984e8800e3bcd9b44465083',
+            'x-b3-traceid': '2f9da41f960ae077',
+            'accept-language': 'zh-cn',
+            'date': 'Mon, 19 Jun 2023 06:41:17 GMT',
+            'x-token-id': '',
+            'x-signaturemethod': 'hmac-sha1',
+            'user-agent': 'xngapp/157 CFNetwork/1335.0.3.1 Darwin/21.6.0'
+        }
+        payload = {
+            "token": "",
+            "limit": 20,
+            "start_t": next_t,
+            "visited_mid": mid,
+            "share_width": 300,
+            "share_height": 240,
+        }
+        response = requests.request(
+            "POST",
+            url,
+            headers=headers,
+            data=json.dumps(payload),
+        )
+        data_list = []
+        if "data" not in response.text or response.status_code != 200:
+            return data_list
+        elif "list" not in response.json()["data"]:
+            return data_list
+        elif len(response.json()["data"]["list"]) == 0:
+            return data_list
+
+        list = response.json()["data"]["list"]
+        for video_obj in list:
+            video_title = clean_title(video_obj.get("title", ""))
+
+            # 发布时间
+            publish_time_stamp = int(int(video_obj.get("t", 0)) / 1000)
+            publish_time_str = time.strftime(
+                "%Y-%m-%d", time.localtime(publish_time_stamp)
+            )
+            date_three_days_ago_string = (date.today() + timedelta(days=-7)).strftime("%Y-%m-%d")
+            rule = publish_time_str >= date_three_days_ago_string
+            if rule == False:
+                return ""
+            v_url = video_obj.get("v_url")
+            data_list.append(video_title + ":" + v_url)
+
+        return data_list
+
+
+
+    def repeat_video_id(self,mid):
+        sql = f"SELECT `uid`  FROM `crawler_user_v3` WHERE  `source` = 'xiaoniangao'  and `uid` = {mid}"
+        repeat_video_id = MysqlHelper.get_values(self.log_type, self.crawler, sql, self.env)
+        return len(repeat_video_id)
+
+    def get_video_info(self, video_element):
+        try:
+            self.get_video_info_2(video_element)
+        except Exception as e:
+            self.driver.press_keycode(AndroidKey.BACK)
+            Common.logger(self.log_type, self.crawler).error(f"抓取单条视频异常:{e}\n")
+
+    def get_videoList(self):
+        self.mq = MQ(topic_name="topic_crawler_etl_" + self.env)
+        self.driver.implicitly_wait(20)
+        # 切换到 web_view
+        self.check_to_applet(xpath='//*[@class="tab-bar--tab tab-bar--tab-selected"]')
+        print("切换到 webview 成功")
+        time.sleep(1)
+        if self.search_elements('//*[@class="list-list--list"]') is None:
+            Common.logger(self.log_type, self.crawler).info("窗口已销毁\n")
+            self.count = 0
+            self.download_cnt = 0
+            self.element_list = []
+            return
+
+        print("开始获取视频信息")
+        for i in range(50):
+            print("下滑{}次".format(i))
+            element = self.parse_detail(i)
+            self.get_video_info(element)
+            self.swipe_up()
+            time.sleep(1)
+            if self.swipe_count > 100:
+                return
+
+        print("下滑完成")
+        Common.logger(self.log_type, self.crawler).info("已抓取完一组,休眠 5 秒\n")
+        time.sleep(5)
+
+
+def run():
+    rule_dict1 = {"period": {"min": 365, "max": 365},
+                  "duration": {"min": 30, "max": 1800},
+                  "favorite_cnt": {"min": 0, "max": 0},
+                  "videos_cnt": {"min": 5000, "max": 0},
+                  "share_cnt": {"min": 0, "max": 0}}
+    XiaoNianGaoPlusRecommend("recommend", "xiaoniangao", "prod", rule_dict1, 6267141)
+
+
+if __name__ == "__main__":
+    process = multiprocessing.Process(
+        target=run
+    )
+    process.start()
+    while True:
+        if not process.is_alive():
+            print("正在重启")
+            process.terminate()
+            time.sleep(60)
+            os.system("adb forward --remove-all")
+            process = multiprocessing.Process(target=run)
+            process.start()
+        time.sleep(60)