Browse Source

add 小年糕user

zhangyong 1 year ago
parent
commit
bb899be637

+ 6 - 0
main/process_mq.sh

@@ -69,6 +69,12 @@ elif [ ${crawler} = "xngrule" ] && [ ${log_type} = "recommend" ];then
   python=python3
   log_path=${piaoquan_crawler_dir}main/main_logs/process-mq-$(date +%Y-%m-%d).log
 
+elif [ ${crawler} = "xnguser" ] && [ ${log_type} = "recommend" ];then
+  piaoquan_crawler_dir=/Users/tzld/Desktop/piaoquan_crawler/
+  profile_path=/.base_profile
+  python=python3
+  log_path=${piaoquan_crawler_dir}main/main_logs/process-mq-$(date +%Y-%m-%d).log
+
 else
   piaoquan_crawler_dir=/root/piaoquan_crawler/
   profile_path=/etc/profile

+ 389 - 0
xiaoniangaoplus/xiaoniangaoplus/xiaoniangao_plus_user.py

@@ -0,0 +1,389 @@
+# -*- coding: utf-8 -*-
+# @Time: 2023/10/31
+import json
+import os
+from datetime import date, timedelta
+
+import requests
+import sys
+import time
+from hashlib import md5
+
+from appium import webdriver
+from appium.webdriver.extensions.android.nativekey import AndroidKey
+from appium.webdriver.webdriver import WebDriver
+from bs4 import BeautifulSoup
+from selenium.common import NoSuchElementException
+from selenium.webdriver.common.by import By
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.mq import MQ
+from common.public import download_rule, get_config_from_mysql
+from common.scheduling_db import MysqlHelper
+
+def get_redirect_url(url):
+    res = requests.get(url, allow_redirects=False)
+    if res.status_code == 302 or res.status_code == 301:
+        return res.headers['Location']
+    else:
+        return url
+class XiaoNianGaoPlusRecommend:
+    platform = "小年糕-user"
+    download_cnt = 0
+    element_list = []
+    i = 0
+
+    @classmethod
+    def start_wechat(cls, log_type, crawler, env, rule_dict, our_uid):
+        if env == "dev":
+            chromedriverExecutable = "/Users/tzld/Downloads/chromedriver_v111/chromedriver"
+        else:
+            chromedriverExecutable = "/Users/tzld/Downloads/chromedriver_v111/chromedriver"
+
+        Common.logger(log_type, crawler).info("启动微信")
+        Common.logging(log_type, crawler, env, '启动微信')
+        caps = {
+            "platformName": "Android",
+            "devicesName": "Android",
+            # "platformVersion": "11",
+            # "udid": "emulator-5554",
+            "appPackage": "com.tencent.mm",
+            "appActivity": ".ui.LauncherUI",
+            "autoGrantPermissions": "true",
+            "noReset": True,
+            "resetkeyboard": True,
+            "unicodekeyboard": True,
+            "showChromedriverLog": True,
+            "printPageSourceOnFailure": True,
+            "recreateChromeDriverSessions": True,
+            "enableWebviewDetailsCollection": True,
+            "setWebContentsDebuggingEnabled": True,
+            "newCommandTimeout": 6000,
+            "automationName": "UiAutomator2",
+            "chromedriverExecutable": chromedriverExecutable,
+            "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
+        }
+        driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
+        driver.implicitly_wait(30)
+
+        for i in range(120):
+            try:
+                if driver.find_elements(By.ID, 'com.tencent.mm:id/f2s'):
+                    Common.logger(log_type, crawler).info("微信启动成功")
+                    Common.logging(log_type, crawler, env, '微信启动成功')
+                    break
+                elif driver.find_element(By.ID, "com.android.systemui:id/dismiss_view"):
+                    Common.logger(log_type, crawler).info("发现并关闭系统下拉菜单")
+                    Common.logging(log_type, crawler, env, '发现并关闭系统下拉菜单')
+                    driver.find_element(By.ID, "com.android.system:id/dismiss_view").click()
+                else:
+                    pass
+            except NoSuchElementException:
+                time.sleep(1)
+
+        Common.logger(log_type, crawler).info("下滑,展示小程序选择面板")
+        Common.logging(log_type, crawler, env, '下滑,展示小程序选择面板')
+        size = driver.get_window_size()
+        driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2),
+                     int(size['width'] * 0.5), int(size['height'] * 0.8), 200)
+        time.sleep(1)
+        Common.logger(log_type, crawler).info('打开小程序"小年糕+"')
+        Common.logging(log_type, crawler, env, '打开小程序"小年糕+"')
+        driver.find_elements(By.XPATH, '//*[@text="小年糕+"]')[-1].click()
+        time.sleep(5)
+
+        cls.get_videoList(log_type, crawler, driver, env, rule_dict, our_uid)
+
+        time.sleep(1)
+        driver.quit()
+
+    @classmethod
+    def search_elements(cls, driver: WebDriver, xpath):
+        time.sleep(1)
+        windowHandles = driver.window_handles
+        for handle in windowHandles:
+            driver.switch_to.window(handle)
+            time.sleep(1)
+            try:
+                elements = driver.find_elements(By.XPATH, xpath)
+                if elements:
+                    return elements
+            except NoSuchElementException:
+                pass
+
+    @classmethod
+    def check_to_applet(cls, log_type, crawler, env, driver: WebDriver):
+        time.sleep(1)
+        webViews = driver.contexts
+        Common.logger(log_type, crawler).info(f"webViews:{webViews}")
+        Common.logging(log_type, crawler, env, f"webViews:{webViews}")
+        driver.switch_to.context(webViews[1])
+        windowHandles = driver.window_handles
+        for handle in windowHandles:
+            driver.switch_to.window(handle)
+            time.sleep(1)
+            try:
+                driver.find_element(By.XPATH, '//*[@class="tab-bar--tab tab-bar--tab-selected"]')
+                Common.logger(log_type, crawler).info("切换到小程序成功\n")
+                Common.logging(log_type, crawler, env, '切换到小程序成功\n')
+                return
+            except NoSuchElementException:
+                time.sleep(1)
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
+
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def swipe_up(cls, driver: WebDriver):
+        cls.search_elements(driver, '//*[@class="videoplay"]')
+        size = driver.get_window_size()
+        driver.swipe(int(size["width"] * 0.7), int(size["height"] * 0.8),
+                     int(size["width"] * 0.7), int(size["height"] * 0.4), 200)
+
+
+
+    @classmethod
+    def get_video_url(cls, log_type, crawler, driver: WebDriver, video_title_element):
+        for i in range(3):
+            cls.search_elements(driver, '//wx-view[@class="expose--adapt-parent"]')
+            Common.logger(log_type, crawler).info(f"video_title_element:{video_title_element[0]}")
+            time.sleep(1)
+            Common.logger(log_type, crawler).info("滑动标题至可见状态")
+            driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'});",
+                                  video_title_element[0])
+            time.sleep(3)
+            Common.logger(log_type, crawler).info("点击标题")
+            video_title_element[0].click()
+            # driver.execute_script("arguments[0].click();", video_title_element[0])
+            Common.logger(log_type, crawler).info("点击标题完成")
+            time.sleep(1)
+            video_url_elements = cls.search_elements(driver,
+                                                     '//wx-video[@class="video-player--video"]')
+            if video_url_elements:
+                return video_url_elements[0].get_attribute("src")
+
+    @classmethod
+    def get_user(cls, video_element,driver, log_type, crawler, env):
+        # 播放量字符串
+        play_str = video_element.find("wx-view", class_="dynamic--views").text
+        user_name = video_element.find("wx-view", class_="dynamic--nick-top").text
+        play_cnt = int(play_str.replace("+", "").replace("次播放", ""))
+        if play_cnt < 30000:
+            Common.logger(log_type, crawler).warning(f"播放量:{play_cnt}")
+            Common.logging(log_type, crawler, env, f"播放量:{play_cnt}")
+            return
+        video_name_element = cls.search_elements(driver, f'//*[contains(text(), "{user_name}")]')
+        if video_name_element is None:
+            Common.logger(log_type, crawler).warning(f"未找到该用户id的element:{video_name_element}")
+            Common.logging(log_type, crawler, env, f"未找到该用户id的element:{video_name_element}")
+            return
+        Common.logger(log_type, crawler).info("点击用户名,进入用户主页")
+        Common.logging(log_type, crawler, env, "点击用户名,进入用户主页")
+        cls.search_elements(driver, '//*[@class="list-list--list"]')
+        Common.logger(log_type, crawler).info(f"video_title_element:{video_name_element[0]}")
+        time.sleep(1)
+        Common.logger(log_type, crawler).info("滑动用户名至可见状态")
+        driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'});",
+                              video_name_element[0])
+        time.sleep(3)
+        Common.logger(log_type, crawler).info("用户名")
+        video_name_element[0].click()
+        Common.logger(log_type, crawler).info("点击用户名")
+        time.sleep(30)
+        cls.search_elements(driver, '//wx-view[@class="expose--adapt-parent"]')
+        page_source = driver.page_source
+        soup = BeautifulSoup(page_source, 'html.parser')
+        soup.prettify()
+
+        video_url_elements = soup.findAll("wx-view", class_="expose--adapt-parent")
+        video_url_elements = list(set(video_url_elements).difference(set(cls.element_list)))
+        cls.element_list = list(set(video_url_elements) | set(cls.element_list))
+        if video_url_elements:
+            return video_url_elements,user_name
+
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, driver: WebDriver, env, rule_dict, our_uid):
+        mq = MQ(topic_name="topic_crawler_etl_" + env)
+        driver.implicitly_wait(20)
+        # 鼠标左键点击, 1为x坐标, 2为y坐标
+        cls.check_to_applet(log_type=log_type, crawler=crawler, env=env, driver=driver)
+        time.sleep(1)
+
+        page = 0
+        while True:
+            if cls.search_elements(driver, '//*[@class="list-list--list"]') is None:
+                Common.logger(log_type, crawler).info("窗口已销毁\n")
+                Common.logging(log_type, crawler, env, '窗口已销毁\n')
+                cls.i = 0
+                cls.download_cnt = 0
+                cls.element_list = []
+                return
+
+            cls.swipe_up(driver)
+
+            page_source = driver.page_source
+            soup = BeautifulSoup(page_source, 'html.parser')
+            soup.prettify()
+
+            video_list_elements = soup.findAll("wx-view", class_="expose--adapt-parent")
+            video_list_elements = list(set(video_list_elements).difference(set(cls.element_list)))
+            cls.element_list = list(set(video_list_elements) | set(cls.element_list))
+            Common.logger(log_type, crawler).info(f"正在抓取第{page + 1}页,共:{len(video_list_elements)}条视频")
+            Common.logging(log_type, crawler, env, f"正在抓取第{page + 1}页,共:{len(video_list_elements)}条视频")
+
+            if len(video_list_elements) == 0:
+                for i in range(10):
+                    Common.logger(log_type, crawler).info(f"向上滑动第{i + 1}次")
+                    cls.swipe_up(driver)
+                    time.sleep(0.5)
+                continue
+
+            for i, video_element in enumerate(video_list_elements):
+                try:
+                    video_url_elements,user_name = cls.get_user(video_element,driver, log_type, crawler, env)
+                    if len(video_url_elements) == 0:
+                        continue
+                    for i, video_element in enumerate(video_url_elements):
+                        try:
+
+                            Common.logger(log_type, crawler).info(f"本轮已抓取{cls.download_cnt}条视频\n")
+                            Common.logging(log_type, crawler, env, f"本轮已抓取{cls.download_cnt}条视频\n")
+                            if cls.download_cnt >= int(rule_dict.get("videos_cnt", {}).get("min", 10)):
+                                cls.i = 0
+                                cls.download_cnt = 0
+                                cls.element_list = []
+                                return
+                            cls.i += 1
+                            Common.logger(log_type, crawler).info(f"第{cls.i}条视频")
+                            Common.logging(log_type, crawler, env, f"第{cls.i}条视频")
+
+                            video_title = video_element.find("wx-view", class_="album--album-cover-title").text
+                            # 播放量字符串
+                            play_str = video_element.find("wx-view", class_="album--album-cover-views").text
+
+                            # 封面 URL
+                            cover_url = video_element.find("wx-image", class_="album--album-cover-bg")["src"]
+                            play_cnt = int(play_str.replace("+", "").replace("次播放", ""))
+                            # 发布时间
+                            play_time = video_element.find("wx-view", class_="album--album-time").text
+                            date_three_days_ago_string = (date.today() + timedelta(days=-3)).strftime("%Y.%m.%d")
+                            rule = play_time > date_three_days_ago_string
+                            if rule == False:
+                                Common.logger(log_type, crawler).info(f"发布时间小于3天,发布时间:{rule}\n")
+                                continue
+
+                            out_video_id = md5(video_title.encode('utf8')).hexdigest()
+                            out_video_id = out_video_id + "user"
+                            out_user_id = md5(user_name.encode('utf8')).hexdigest()
+                            out_user_id = out_user_id + "user"
+                            video_dict = {
+                                "video_title": video_title,
+                                "video_id": out_video_id,
+                                "duration_str": '',
+                                "duration": 0,
+                                "play_str": play_str,
+                                "play_cnt": play_cnt,
+                                "like_str": "",
+                                "like_cnt": 0,
+                                "comment_cnt": 0,
+                                "share_cnt": 0,
+                                "user_name": user_name,
+                                "user_id": out_user_id,
+                                'publish_time_stamp': int(time.time()),
+                                'publish_time_str': time.strftime("%Y-%m-%d %H:%M:%S",
+                                                                  time.localtime(int(time.time()))),
+                                "avatar_url": cover_url,
+                                "cover_url": cover_url,
+                                "session": f"xiaoniangao-{int(time.time())}"
+                            }
+
+                            for k, v in video_dict.items():
+                                Common.logger(log_type, crawler).info(f"{k}:{v}")
+                            Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
+                            # Common.logger(log_type, crawler).info(f"==========分割线==========\n")
+
+                            if video_title is None or cover_url is None:
+                                Common.logger(log_type, crawler).info("无效视频\n")
+                                Common.logging(log_type, crawler, env, '无效视频\n')
+                                cls.swipe_up(driver)
+                                time.sleep(0.5)
+                            elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict,
+                                               rule_dict=rule_dict) is False:
+                                Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                                Common.logging(log_type, crawler, env, "不满足抓取规则\n")
+                                cls.swipe_up(driver)
+                                time.sleep(0.5)
+                            elif any(str(word) if str(word) in video_dict["video_title"] else False
+                                     for word in get_config_from_mysql(log_type=log_type,
+                                                                       source=crawler,
+                                                                       env=env,
+                                                                       text="filter",
+                                                                       action="")) is True:
+                                Common.logger(log_type, crawler).info('已中过滤词\n')
+                                Common.logging(log_type, crawler, env, '已中过滤词\n')
+                                cls.swipe_up(driver)
+                                time.sleep(0.5)
+                            elif cls.repeat_video(log_type, crawler, out_video_id, env) != 0:
+                                Common.logger(log_type, crawler).info('视频已下载\n')
+                                Common.logging(log_type, crawler, env, '视频已下载\n')
+                                cls.swipe_up(driver)
+                                time.sleep(5)
+                            else:
+                                video_title_element = cls.search_elements(driver,
+                                                                          f'//*[contains(text(), "{video_title}")]')
+                                if video_title_element is None:
+                                    Common.logger(log_type, crawler).warning(f"未找到该视频标题的element:{video_title_element}")
+                                    Common.logging(log_type, crawler, env, f"未找到该视频标题的element:{video_title_element}")
+                                    continue
+                                Common.logger(log_type, crawler).info("点击标题,进入视频详情页")
+                                Common.logging(log_type, crawler, env, "点击标题,进入视频详情页")
+                                video_url = cls.get_video_url(log_type, crawler, driver, video_title_element)
+                                video_url = get_redirect_url(video_url)
+
+                                if video_url is None:
+                                    Common.logger(log_type, crawler).info("未获取到视频播放地址\n")
+                                    driver.press_keycode(AndroidKey.BACK)
+                                    time.sleep(5)
+                                    continue
+
+                                video_dict['video_url'] = video_url
+                                Common.logger(log_type, crawler).info(f"video_url:{video_url}")
+
+                                video_dict["platform"] = crawler
+                                video_dict["strategy"] = log_type
+                                video_dict["out_video_id"] = video_dict["video_id"]
+                                video_dict["crawler_rule"] = json.dumps(rule_dict)
+                                video_dict["user_id"] = our_uid
+                                video_dict["publish_time"] = video_dict["publish_time_str"]
+                                mq.send_msg(video_dict)
+                                cls.download_cnt += 1
+                                driver.press_keycode(AndroidKey.BACK)
+                                time.sleep(5)
+                        except Exception as e:
+                            Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+                            Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
+                    driver.press_keycode(AndroidKey.BACK)
+                    time.sleep(5)
+                except Exception as e:
+                    Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+                    Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
+            Common.logger(log_type, crawler).info("已抓取完一组,休眠 5 秒\n")
+            Common.logging(log_type, crawler, env, "已抓取完一组,休眠 5 秒\n")
+            time.sleep(5)
+            page += 1
+
+
+
+if __name__ == "__main__":
+    rule_dict1 = {"period": {"min": 0, "max": 365},
+                  "duration": {"min": 0, "max": 1800},
+                  "favorite_cnt": {"min": 0, "max": 0},
+                  "videos_cnt": {"min": 10, "max": 20},
+                  "share_cnt": {"min": 0, "max": 0}}
+    XiaoNianGaoPlusRecommend.start_wechat("recommend", "xiaoniangao", "dev", rule_dict1, 6267141)

+ 143 - 0
xiaoniangaoplus/xiaoniangaoplus_main/run_xnguser_recommend.py

@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+# @Time: 2023/10/24
+import argparse
+import random
+import multiprocessing
+
+from mq_http_sdk.mq_client import *
+from mq_http_sdk.mq_consumer import *
+sys.path.append(os.getcwd())
+
+from mq_http_sdk.mq_exception import MQExceptionBase
+
+
+from common.common import Common
+from common.public import get_consumer, ack_message, task_fun_mq
+from common.scheduling_db import MysqlHelper
+from xiaoniangaoplus.xiaoniangaoplus.xiaoniangao_plus_user import XiaoNianGaoPlusRecommend
+
+
+
+def run(args1, args2, args3, args4, args5):
+    XiaoNianGaoPlusRecommend.start_wechat(log_type=args1,
+                               crawler=args2,
+                               rule_dict=args3,
+                               our_uid=args4,
+                               env=args5)
+
+
+class Main:
+    @classmethod
+    def main(cls, log_type, crawler, topic_name, group_id, env):
+        consumer = get_consumer(topic_name, group_id)
+        # 长轮询表示如果Topic没有消息,则客户端请求会在服务端挂起3秒,3秒内如果有消息可以消费则立即返回响应。
+        # 长轮询时间3秒(最多可设置为30秒)。
+        wait_seconds = 30
+        # 一次最多消费3条(最多可设置为16条)。
+        batch = 1
+        Common.logger(log_type, crawler).info(f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                              f'WaitSeconds:{wait_seconds}\n'
+                                              f'TopicName:{topic_name}\n'
+                                              f'MQConsumer:{group_id}')
+        Common.logging(log_type, crawler, env, f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                               f'WaitSeconds:{wait_seconds}\n'
+                                               f'TopicName:{topic_name}\n'
+                                               f'MQConsumer:{group_id}')
+        while True:
+            try:
+                # 长轮询消费消息。
+                recv_msgs = consumer.consume_message(batch, wait_seconds)
+                for msg in recv_msgs:
+                    Common.logger(log_type, crawler).info(f"Receive\n"
+                                                          f"MessageId:{msg.message_id}\n"
+                                                          f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                          f"MessageTag:{msg.message_tag}\n"
+                                                          f"ConsumedTimes:{msg.consumed_times}\n"
+                                                          f"PublishTime:{msg.publish_time}\n"
+                                                          f"Body:{msg.message_body}\n"
+                                                          f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                          f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                          f"Properties:{msg.properties}")
+                    Common.logging(log_type, crawler, env, f"Receive\n"
+                                                           f"MessageId:{msg.message_id}\n"
+                                                           f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                           f"MessageTag:{msg.message_tag}\n"
+                                                           f"ConsumedTimes:{msg.consumed_times}\n"
+                                                           f"PublishTime:{msg.publish_time}\n"
+                                                           f"Body:{msg.message_body}\n"
+                                                           f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                           f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                           f"Properties:{msg.properties}")
+                    # ack_mq_message
+                    ack_message(log_type=log_type, crawler=crawler, recv_msgs=recv_msgs, consumer=consumer)
+
+                    # 处理爬虫业务
+                    task_dict = task_fun_mq(msg.message_body)['task_dict']
+                    rule_dict = task_fun_mq(msg.message_body)['rule_dict']
+                    task_id = task_dict['id']
+                    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+                    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+                    our_uid_list = []
+                    for user in user_list:
+                        our_uid_list.append(user["uid"])
+                    our_uid = random.choice(our_uid_list)
+                    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+                    Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
+                    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+                    Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}")
+                    Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
+                    Common.logging(log_type, crawler, env, f"用户列表:{user_list}\n")
+                    Common.logger(log_type, crawler).info(f'开始抓取:{task_dict["taskName"]}\n')
+                    Common.logging(log_type, crawler, env, f'开始抓取:{task_dict["taskName"]}\n')
+
+                    process = multiprocessing.Process(
+                        target=run,
+                        args=(log_type, crawler, rule_dict, our_uid, env)
+                    )
+                    process.start()
+                    print("进程开始")
+
+                    for i in range(10):
+                        if not process.is_alive():
+
+                            print("进程异常,准备重启")
+                            process.terminate()
+                            os.system("adb forward --remove-all")
+                            time.sleep(60)
+                            process = multiprocessing.Process(
+                                target=run,
+                                args=(log_type, crawler, rule_dict, our_uid, env)
+                            )
+                            process.start()
+                        time.sleep(60)
+
+
+                    Common.logger(log_type, crawler).info('抓取一轮结束\n')
+                    Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+
+            except MQExceptionBase as err:
+                # Topic中没有消息可消费。
+                if err.type == "MessageNotExist":
+                    Common.logger(log_type, crawler).info(f"No new message! RequestId:{err.req_id}\n")
+                    Common.logging(log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n")
+                    continue
+
+                Common.logger(log_type, crawler).info(f"Consume Message Fail! Exception:{err}\n")
+                Common.logging(log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n")
+                time.sleep(2)
+                continue
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--topic_name')  ## 添加参数
+    parser.add_argument('--group_id')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    Main.main(log_type=args.log_type,
+                              crawler=args.crawler,
+                              topic_name=args.topic_name,
+                              group_id=args.group_id,
+                              env=args.env)