Browse Source

add 小年糕rule

zhangyong 1 year ago
parent
commit
11d3222164

+ 22 - 2
main/process_offline.sh

@@ -74,7 +74,7 @@ if [[ "$time" > "00:00:00"  &&  "$time" < "02:59:59" || "$time" > "12:00:00"  &&
   echo "$(date "+%Y-%m-%d %H:%M:%S") 开始启动 吉祥幸福 爬虫脚本任务" >> ${log_path}
 #  ps aux | grep run_htzf | grep -v grep | awk '{print $2}' | xargs kill -9
 #  ps aux | grep run_zhongmiaoyinxin | grep -v grep | awk '{print $2}' | xargs kill -9
-  ps aux | grep run_zmyx | grep -v grep | awk '{print $2}' | xargs kill -9
+  ps aux | grep run_xngrule | grep -v grep | awk '{print $2}' | xargs kill -9
   ps -ef | grep "run_jixiangxingfu_recommend.py" | grep -v "grep"
   if [ "$?" -eq 1 ];then
     echo "$(date "+%Y-%m-%d %H:%M:%S") 吉祥幸福爬虫, 异常停止, 正在重启!" >> ${log_path}
@@ -112,7 +112,7 @@ else
 fi
 
 # 众妙音信-new
-if [[ "$time" > "05:00:00"  &&  "$time" < "06:59:59" || "$time" > "20:00:00"  &&  "$time" < "21:59:59" ]];then
+if [[ "$time" > "05:00:00"  &&  "$time" < "06:59:59" || "$time" > "17:00:00"  &&  "$time" < "18:59:59" ]];then
   echo "$(date "+%Y-%m-%d %H:%M:%S") 开始启动 众妙音信-new 爬虫脚本任务" >> ${log_path}
 #  ps aux | grep run_htzf | grep -v grep | awk '{print $2}' | xargs kill -9
 #  ps aux | grep run_ganggangdouchuan | grep -v grep | awk '{print $2}' | xargs kill -9
@@ -132,6 +132,26 @@ else
   echo "$(date "+%Y-%m-%d %H:%M:%S") 众妙音信 爬虫脚本任务结束" >> ${log_path}
 fi
 
+# 小年糕-rule
+if [[ "$time" > "07:00:00"  &&  "$time" < "08:59:59" || "$time" > "19:00:00"  &&  "$time" < "21:59:59" ]];then
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 开始启动 小年糕-rule 爬虫脚本任务" >> ${log_path}
+#  ps aux | grep run_htzf | grep -v grep | awk '{print $2}' | xargs kill -9
+#  ps aux | grep run_ganggangdouchuan | grep -v grep | awk '{print $2}' | xargs kill -9
+  ps aux | grep run_zmyx | grep -v grep | awk '{print $2}' | xargs kill -9
+  ps -ef | grep "run_xngrule_recommend.py" | grep -v "grep"
+  if [ "$?" -eq 1 ];then
+    echo "$(date "+%Y-%m-%d %H:%M:%S") 小年糕-rule 小程序爬虫, 异常停止, 正在重启!" >> ${log_path}
+    adb forward --remove-all
+    cd ${piaoquan_crawler_dir}
+    nohup python3 -u xiaoniangaoplus/xiaoniangaoplus_main/run_xngrule_recommend.py --log_type="recommend" --crawler="xiaoniangaorule" --env=${env} >>xiaoniangaoplus/logs/nohup-recommend.log 2>&1 &
+    echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
+  else
+    echo "$(date "+%Y-%m-%d %H:%M:%S") 众妙音信-new小程序爬虫, 进程状态正常" >> ${log_path}
+  fi
+
+else
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 众妙音信 爬虫脚本任务结束" >> ${log_path}
+fi
 
 # 删除日志
 echo "$(date "+%Y-%m-%d %H:%M:%S") 开始清理 10 天前的日志文件" >> ${log_path}

+ 375 - 0
xiaoniangaoplus/xiaoniangaoplus/xiaoniangao_plus_rule.py

@@ -0,0 +1,375 @@
+# -*- coding: utf-8 -*-
+# @Time: 2023/10/30
+import json
+import os
+import sys
+import time
+from hashlib import md5
+
+import requests
+from appium import webdriver
+from appium.webdriver.extensions.android.nativekey import AndroidKey
+from bs4 import BeautifulSoup
+from selenium.common.exceptions import NoSuchElementException
+from selenium.webdriver.common.by import By
+import multiprocessing
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.mq import MQ
+from common.public import download_rule, get_config_from_mysql
+from common.scheduling_db import MysqlHelper
+
+
+def get_redirect_url(url):
+    res = requests.get(url, allow_redirects=False)
+    if res.status_code == 302 or res.status_code == 301:
+        return res.headers['Location']
+    else:
+        return url
+
+class XiaoNianGaoPlusRecommend:
+    env = None
+    driver = None
+    log_type = None
+
+    def __init__(self, log_type, crawler, env, rule_dict, our_uid):
+        self.mq = None
+        self.platform = "小年糕-rule"
+        self.download_cnt = 0
+        self.element_list = []
+        self.count = 0
+        self.swipe_count = 0
+        self.log_type = log_type
+        self.crawler = crawler
+        self.env = env
+        self.rule_dict = rule_dict
+        self.our_uid = our_uid
+        if self.env == "dev":
+            chromedriverExecutable = "/Users/tzld/Downloads/chromedriver_v111/chromedriver"
+        else:
+            chromedriverExecutable = "/Users/tzld/Downloads/chromedriver_v111/chromedriver"
+
+        Common.logger(self.log_type, self.crawler).info("启动微信")
+        Common.logging(self.log_type, self.crawler, self.env, '启动微信')
+        # 微信的配置文件
+        caps = {
+            "platformName": "Android",
+            "devicesName": "Android",
+            # "platformVersion": "13",
+            # "udid": "emulator-5554",
+            "appPackage": "com.tencent.mm",
+            "appActivity": ".ui.LauncherUI",
+            "autoGrantPermissions": "true",
+            "noReset": True,
+            "resetkeyboard": True,
+            "unicodekeyboard": True,
+            "showChromedriverLog": True,
+            "printPageSourceOnFailure": True,
+            "recreateChromeDriverSessions": True,
+            "enableWebviewDetailsCollection": True,
+            "setWebContentsDebuggingEnabled": True,
+            "newCommandTimeout": 6000,
+            "automationName": "UiAutomator2",
+            "chromedriverExecutable": chromedriverExecutable,
+            "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
+        }
+        self.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
+        self.driver.implicitly_wait(30)
+
+        for i in range(120):
+            try:
+                if self.driver.find_elements(By.ID, "com.tencent.mm:id/f2s"):
+                    Common.logger(self.log_type, self.crawler).info("微信启动成功")
+                    Common.logging(self.log_type, self.crawler, self.env, '微信启动成功')
+                    break
+                elif self.driver.find_element(By.ID, "com.android.systemui:id/dismiss_view"):
+                    Common.logger(self.log_type, self.crawler).info("发现并关闭系统下拉菜单")
+                    Common.logging(self.log_type, self.crawler, self.env, '发现并关闭系统下拉菜单')
+                    self.driver.find_element(By.ID, "com.android.system:id/dismiss_view").click()
+                else:
+                    pass
+            except NoSuchElementException:
+                time.sleep(1)
+
+        Common.logger(self.log_type, self.crawler).info("下滑,展示小程序选择面板")
+        # Common.logging(self.log_type, self.crawler, self.env, '下滑,展示小程序选择面板')
+        size = self.driver.get_window_size()
+        self.driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2),
+                          int(size['width'] * 0.5), int(size['height'] * 0.8), 200)
+        time.sleep(1)
+        Common.logger(self.log_type, self.crawler).info('打开小程序"小年糕+"')
+        # Common.logging(self.log_type, self.crawler, self.env, '打开小程序"小年糕+"')
+        self.driver.find_elements(By.XPATH, '//*[@text="小年糕+"]')[-1].click()
+        time.sleep(5)
+        self.get_videoList()
+        time.sleep(1)
+        self.driver.quit()
+
+    def search_elements(self, xpath):
+        time.sleep(1)
+        windowHandles = self.driver.window_handles
+        for handle in windowHandles:
+            self.driver.switch_to.window(handle)
+            time.sleep(1)
+            try:
+                elements = self.driver.find_elements(By.XPATH, xpath)
+                if elements:
+                    return elements
+            except NoSuchElementException:
+                pass
+
+    def check_to_applet(self, xpath):
+        time.sleep(1)
+        webViews = self.driver.contexts
+        self.driver.switch_to.context(webViews[-1])
+        windowHandles = self.driver.window_handles
+        for handle in windowHandles:
+            self.driver.switch_to.window(handle)
+            time.sleep(1)
+            try:
+                self.driver.find_element(By.XPATH, xpath)
+                Common.logger(self.log_type, self.crawler).info("切换到WebView成功\n")
+                Common.logging(self.log_type, self.crawler, self.env, '切换到WebView成功\n')
+                return
+            except NoSuchElementException:
+                time.sleep(1)
+
+    def repeat_video(self, video_id):
+        sql = f""" select * from crawler_video where platform in ("众妙音信", "刚刚都传", "吉祥幸福", "知青天天看", "zhufuquanzi", "祝福圈子", "haitunzhufu", "海豚祝福", "小年糕") and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(self.log_type, self.crawler, sql, self.env)
+        return len(repeat_video)
+
+    def swipe_up(self):
+        self.search_elements('//*[@class="list-list--list"]')
+        size = self.driver.get_window_size()
+        self.driver.swipe(int(size["width"] * 0.5), int(size["height"] * 0.8),
+                          int(size["width"] * 0.5), int(size["height"] * 0.442), 200)
+        self.swipe_count += 1
+
+    def get_video_url(self, video_title_element):
+        for i in range(3):
+            self.search_elements('//*[@class="list-list--list"]')
+            Common.logger(self.log_type, self.crawler).info(f"video_title_element:{video_title_element[0]}")
+            time.sleep(1)
+            Common.logger(self.log_type, self.crawler).info("滑动标题至可见状态")
+            self.driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'});",
+                                       video_title_element[0])
+            time.sleep(3)
+            Common.logger(self.log_type, self.crawler).info("点击标题")
+            video_title_element[0].click()
+            self.check_to_applet(xpath=r'//wx-video[@class="dynamic-index--video-item dynamic-index--video"]')
+            Common.logger(self.log_type, self.crawler).info("点击标题完成")
+            time.sleep(10)
+            video_url_elements = self.search_elements(
+                '//wx-video[@class="dynamic-index--video-item dynamic-index--video"]')
+            if video_url_elements:
+                return video_url_elements[0].get_attribute("src")
+
+    def parse_detail(self, index):
+        page_source = self.driver.page_source
+        soup = BeautifulSoup(page_source, 'html.parser')
+        soup.prettify()
+        video_list = soup.findAll(name="wx-view", attrs={"class": "expose--adapt-parent"})
+        element_list = [i for i in video_list][index:]
+        return element_list[0]
+
+    def get_video_info_2(self, video_element):
+        Common.logger(self.log_type, self.crawler).info(f"本轮已抓取{self.download_cnt}条视频\n")
+        # Common.logging(self.log_type, self.crawler, self.env, f"本轮已抓取{self.download_cnt}条视频\n")
+        if self.download_cnt >= int(self.rule_dict.get("videos_cnt", {}).get("min", 10)):
+            self.count = 0
+            self.download_cnt = 0
+            self.element_list = []
+            return
+        self.count += 1
+        Common.logger(self.log_type, self.crawler).info(f"第{self.count}条视频")
+        # Common.logging(self.log_type, self.crawler, self.env, f"第{self.count}条视频")
+        # 标题
+        video_title = video_element.find("wx-view", class_="dynamic--title").text
+        # 播放量字符串
+        play_str = video_element.find("wx-view", class_="dynamic--views").text
+        info_list = video_element.findAll("wx-view", class_="dynamic--commerce-btn-text")
+        # 点赞数量
+        like_str = info_list[1].text
+        # 评论数量
+        comment_str = info_list[2].text
+        # 视频时长
+        duration_str = video_element.find("wx-view", class_="dynamic--duration").text
+        user_name = video_element.find("wx-view", class_="dynamic--nick-top").text
+        # 头像 URL
+        avatar_url = video_element.find("wx-image", class_="avatar--avatar")["src"]
+        # 封面 URL
+        cover_url = video_element.find("wx-image", class_="dynamic--bg-image")["src"]
+        play_cnt = int(play_str.replace("+", "").replace("次播放", ""))
+        duration = int(duration_str.split(":")[0].strip()) * 60 + int(duration_str.split(":")[-1].strip())
+        if "点赞" in like_str:
+            like_cnt = 0
+        elif "万" in like_str:
+            like_cnt = int(like_str.split("万")[0]) * 10000
+        else:
+            like_cnt = int(like_str)
+        if "评论" in comment_str:
+            comment_cnt = 0
+        elif "万" in comment_str:
+            comment_cnt = int(comment_str.split("万")[0]) * 10000
+        else:
+            comment_cnt = int(comment_str)
+        out_video_id = md5(video_title.encode('utf8')).hexdigest()
+        out_video_id = out_video_id+"rule"
+        out_user_id = md5(user_name.encode('utf8')).hexdigest()
+        out_user_id = out_user_id+"rule"
+        video_dict = {
+            "video_title": video_title,
+            "video_id": out_video_id,
+            "duration_str": duration_str,
+            "duration": duration,
+            "play_str": play_str,
+            "play_cnt": play_cnt,
+            "like_str": like_str,
+            "like_cnt": like_cnt,
+            "comment_cnt": comment_cnt,
+            "share_cnt": 0,
+            "user_name": user_name,
+            "user_id": out_user_id,
+            'publish_time_stamp': int(time.time()),
+            'publish_time_str': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
+            "avatar_url": avatar_url,
+            "cover_url": cover_url,
+            "session": f"xiaoniangao-{int(time.time())}"
+        }
+        video_percent = '%.3f' % (like_cnt / play_cnt)
+        if play_cnt < 100000:
+            if float(video_percent) <= 0.017:
+                Common.logger(self.log_type, self.crawler).info(f"不符合条件:点赞/播放-{video_percent},播放量-{play_cnt}\n")
+                return
+        for k, v in video_dict.items():
+            Common.logger(self.log_type, self.crawler).info(f"{k}:{v}")
+        Common.logging(self.log_type, self.crawler, self.env, f"video_dict:{video_dict}")
+
+        if video_title is None or cover_url is None:
+            Common.logger(self.log_type, self.crawler).info("无效视频\n")
+            Common.logging(self.log_type, self.crawler, self.env, '无效视频\n')
+            # self.swipe_up()
+            time.sleep(0.5)
+        elif download_rule(log_type=self.log_type,
+                           crawler=self.crawler,
+                           video_dict=video_dict,
+                           rule_dict=self.rule_dict) is False:
+            Common.logger(self.log_type, self.crawler).info("不满足抓取规则\n")
+            Common.logging(self.log_type, self.crawler, self.env, "不满足抓取规则\n")
+            # self.swipe_up()
+            time.sleep(0.5)
+        elif any(str(word) if str(word) in video_dict["video_title"] else False
+                 for word in get_config_from_mysql(log_type=self.log_type,
+                                                   source=self.crawler,
+                                                   env=self.env,
+                                                   text="filter",
+                                                   action="")) is True:
+            Common.logger(self.log_type, self.crawler).info('已中过滤词\n')
+            Common.logging(self.log_type, self.crawler, self.env, '已中过滤词\n')
+            time.sleep(0.5)
+        elif self.repeat_video(out_video_id) != 0:
+            Common.logger(self.log_type, self.crawler).info('视频已下载\n')
+            Common.logging(self.log_type, self.crawler, self.env, '视频已下载\n')
+            time.sleep(5)
+        else:
+            video_title_element = self.search_elements(f'//*[contains(text(), "{video_title}")]')
+            if video_title_element is None:
+                Common.logger(self.log_type, self.crawler).warning(
+                    f"未找到该视频标题的element:{video_title_element}")
+                Common.logging(self.log_type, self.crawler, self.env,
+                               f"未找到该视频标题的element:{video_title_element}")
+                return
+            Common.logger(self.log_type, self.crawler).info("点击标题,进入视频详情页")
+            Common.logging(self.log_type, self.crawler, self.env, "点击标题,进入视频详情页")
+            video_url = self.get_video_url(video_title_element)
+            video_url = get_redirect_url(video_url)
+            if video_url is None:
+                Common.logger(self.log_type, self.crawler).info("未获取到视频播放地址\n")
+                self.driver.press_keycode(AndroidKey.BACK)
+                time.sleep(5)
+                return
+
+            video_mid_elements = self.search_elements("//wx-view[@class='bar--navBar-content-capsule']")
+            mid = 0
+            if video_mid_elements:
+                mid = int(video_mid_elements[0].get_attribute("data-mid"))
+            video_dict['profile_id'] = mid
+            video_dict['video_url'] = video_url
+            Common.logger(self.log_type, self.crawler).info(f"video_url:{video_url}")
+
+            video_dict["platform"] = self.crawler
+            video_dict["strategy"] = self.log_type
+            video_dict["out_video_id"] = video_dict["video_id"]
+            video_dict["crawler_rule"] = json.dumps(self.rule_dict)
+            video_dict["user_id"] = self.our_uid
+            video_dict["publish_time"] = video_dict["publish_time_str"]
+            self.mq.send_msg(video_dict)
+            # print(video_dict)
+            self.download_cnt += 1
+            self.driver.press_keycode(AndroidKey.BACK)
+            time.sleep(5)
+
+    def get_video_info(self, video_element):
+        try:
+            self.get_video_info_2(video_element)
+        except Exception as e:
+            Common.logger(self.log_type, self.crawler).error(f"抓取单条视频异常:{e}\n")
+
+    def get_videoList(self):
+        self.mq = MQ(topic_name="topic_crawler_etl_" + self.env)
+        self.driver.implicitly_wait(20)
+        # 切换到 web_view
+        self.check_to_applet(xpath='//*[@class="tab-bar--tab tab-bar--tab-selected"]')
+        print("切换到 webview 成功")
+        time.sleep(1)
+        page = 0
+        if self.search_elements('//*[@class="list-list--list"]') is None:
+            Common.logger(self.log_type, self.crawler).info("窗口已销毁\n")
+            Common.logging(self.log_type, self.crawler, self.env, '窗口已销毁\n')
+            self.count = 0
+            self.download_cnt = 0
+            self.element_list = []
+            return
+
+        print("开始获取视频信息")
+        for i in range(50):
+            print("下滑{}次".format(i))
+            element = self.parse_detail(i)
+            self.get_video_info(element)
+            self.swipe_up()
+            time.sleep(1)
+            if self.swipe_count > 100:
+                return
+
+        print("下滑完成")
+        # time.sleep(100)
+        Common.logger(self.log_type, self.crawler).info("已抓取完一组,休眠 5 秒\n")
+        Common.logging(self.log_type, self.crawler, self.env, "已抓取完一组,休眠 5 秒\n")
+        time.sleep(5)
+
+
+def run():
+    rule_dict1 = {"period": {"min": 0, "max": 365},
+                  "duration": {"min": 0, "max": 1800},
+                  "favorite_cnt": {"min": 0, "max": 0},
+                  "videos_cnt": {"min": 1, "max": 0},
+                  "share_cnt": {"min": 0, "max": 0}}
+    XiaoNianGaoPlusRecommend("recommend", "xiaoniangao", "dev", rule_dict1, 6267141)
+
+
+if __name__ == "__main__":
+    process = multiprocessing.Process(
+        target=run
+    )
+    process.start()
+    while True:
+        if not process.is_alive():
+            print("正在重启")
+            process.terminate()
+            time.sleep(60)
+            os.system("adb forward --remove-all")
+            process = multiprocessing.Process(target=run)
+            process.start()
+        time.sleep(60)

+ 159 - 0
xiaoniangaoplus/xiaoniangaoplus_main/run_xngrule_recommend.py

@@ -0,0 +1,159 @@
+# -*- coding: utf-8 -*-
+# @Author: luojunhui
+# @Time: 2023/9/27
+import argparse
+import random
+from mq_http_sdk.mq_client import *
+from mq_http_sdk.mq_consumer import *
+from mq_http_sdk.mq_exception import MQExceptionBase
+import multiprocessing
+
+
+sys.path.append(os.getcwd())
+from common.public import get_consumer, ack_message, task_fun_mq, get_rule_from_mysql
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from xiaoniangaoplus.xiaoniangaoplus.xiaoniangao_plus_rule import XiaoNianGaoPlusRecommend
+
+
+
+def run(args1, args2, args3, args4, args5):
+    XiaoNianGaoPlusRecommend(
+        log_type=args1,
+        crawler=args2,
+        env=args3,
+        rule_dict=args4,
+        our_uid=args5
+    )
+
+
+def main(log_type, crawler, topic_name, group_id, env):
+    consumer = get_consumer(topic_name, group_id)
+    # 长轮询表示如果Topic没有消息,则客户端请求会在服务端挂起3秒,3秒内如果有消息可以消费则立即返回响应。
+    # 长轮询时间3秒(最多可设置为30秒)。
+    wait_seconds = 30
+    # 一次最多消费3条(最多可设置为16条)。
+    batch = 1
+    Common.logger(log_type, crawler).info(f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                          f'WaitSeconds:{wait_seconds}\n'
+                                          f'TopicName:{topic_name}\n'
+                                          f'MQConsumer:{group_id}')
+    Common.logging(log_type, crawler, env, f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                           f'WaitSeconds:{wait_seconds}\n'
+                                           f'TopicName:{topic_name}\n'
+                                           f'MQConsumer:{group_id}')
+    while True:
+        try:
+            # 长轮询消费消息。
+            recv_msgs = consumer.consume_message(batch, wait_seconds)
+            for msg in recv_msgs:
+                xng_play_start_time = int(time.time())
+                Common.logger(log_type, crawler).info(f"Receive\n"
+                                                      f"MessageId:{msg.message_id}\n"
+                                                      f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                      f"MessageTag:{msg.message_tag}\n"
+                                                      f"ConsumedTimes:{msg.consumed_times}\n"
+                                                      f"PublishTime:{msg.publish_time}\n"
+                                                      f"Body:{msg.message_body}\n"
+                                                      f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                      f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                      f"Properties:{msg.properties}")
+                Common.logging(log_type, crawler, env, f"Receive\n"
+                                                       f"MessageId:{msg.message_id}\n"
+                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                       f"MessageTag:{msg.message_tag}\n"
+                                                       f"ConsumedTimes:{msg.consumed_times}\n"
+                                                       f"PublishTime:{msg.publish_time}\n"
+                                                       f"Body:{msg.message_body}\n"
+                                                       f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                       f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                       f"Properties:{msg.properties}")
+                # ack_mq_message
+                ack_message(log_type=log_type, crawler=crawler, recv_msgs=recv_msgs, consumer=consumer)
+
+                # 处理爬虫业务
+                task_dict = task_fun_mq(msg.message_body)['task_dict']
+                rule_dict = task_fun_mq(msg.message_body)['rule_dict']
+                task_id = task_dict['id']
+                select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+                user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+                our_uid_list = []
+                for user in user_list:
+                    our_uid_list.append(user["uid"])
+                our_uid = random.choice(our_uid_list)
+                Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+                Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
+                # Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+                # Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}")
+                Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
+                Common.logger(log_type, crawler).info(f'开始抓取:{task_dict["taskName"]}\n')
+                Common.logging(log_type, crawler, env, f'开始抓取:{task_dict["taskName"]}\n')
+                new_r = get_rule_from_mysql(task_id=task_id, log_type=log_type, crawler=crawler, env=env)
+                # Common.logger(log_type, crawler).info(f'rule_dict:{new_r}\n')
+                r_d = {}
+                for item in new_r:
+                    for k, val in item.items():
+                        r_d[k] = val
+                Common.logger(log_type, crawler).info(f"抓取规则:{r_d}")
+                Common.logging(log_type, crawler, env, f"抓取规则:{r_d}")
+                process = multiprocessing.Process(
+                    target=run,
+                    args=(log_type, crawler, env, r_d, our_uid)
+                )
+                process.start()
+                print("进程开始")
+
+                while True:
+                    if not process.is_alive():
+                        print("正在重启")
+                        process.terminate()
+                        os.system("adb forward --remove-all")
+                        time.sleep(60)
+                        new_r = get_rule_from_mysql(task_id=task_id, log_type=log_type, crawler=crawler, env=env)
+                        r_d = {}
+                        for item in new_r:
+                            for k, val in item.items():
+                                r_d[k] = val
+                        Common.logger(log_type, crawler).info(f'抓取规则:{r_d}')
+                        Common.logging(log_type, crawler, env, f"抓取规则:{r_d}")
+                        process = multiprocessing.Process(target=run, args=(log_type, crawler, env, r_d, our_uid))
+                        process.start()
+                    time.sleep(60)
+                # XiaoNianGaoPlusRecommend.start_wechat(log_type=log_type,
+                #                                       crawler=crawler,
+                #                                       rule_dict=rule_dict,
+                #                                       our_uid=our_uid,
+                #                                       env=env)git
+                # Common.del_logs(log_type, crawler)
+                # Common.logger(log_type, crawler).info('抓取一轮结束\n')
+                # Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+                # xng_play_end_time = int(time.time())
+                # xng_play_duration = xng_play_start_time - xng_play_end_time
+                # Common.logger(log_type, crawler).info(f"duration {xng_play_duration}")
+                # Common.logging(log_type, crawler, env, f"duration {xng_play_duration}")
+        except MQExceptionBase as err:
+            # Topic中没有消息可消费。
+            if err.type == "MessageNotExist":
+                Common.logger(log_type, crawler).info(f"No new message! RequestId:{err.req_id}\n")
+                Common.logging(log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n")
+                continue
+
+            Common.logger(log_type, crawler).info(f"Consume Message Fail! Exception:{err}\n")
+            Common.logging(log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n")
+            time.sleep(2)
+            continue
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--topic_name')  ## 添加参数
+    parser.add_argument('--group_id')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         topic_name=args.topic_name,
+         group_id=args.group_id,
+         env=args.env)