zhangyong пре 1 година
родитељ
комит
09f00c32f3

Разлика између датотеке није приказан због своје велике величине
+ 0 - 0
kanyikan/kanyikan_recommend/kanyikan/chlsfiles/charles202311231411.txt


+ 27 - 0
main/process_offline.sh

@@ -109,6 +109,7 @@ if [[ "$time" > "07:00:00"  &&  "$time" < "08:59:59" || "$time" > "19:00:00"  &&
   ps aux | grep run_zfqz | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_zmyx | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_jixiangxingfu | grep -v grep | awk '{print $2}' | xargs kill -9
+  ps aux | grep run_ppq | grep -v grep | awk '{print $2}' | xargs kill -9
   ps -ef | grep "run_xngplus_recommend.py" | grep -v "grep"
   if [ "$?" -eq 1 ];then
     echo "$(date "+%Y-%m-%d %H:%M:%S") 小年糕+ 小程序爬虫, 异常停止, 正在重启!" >> ${log_path}
@@ -150,6 +151,32 @@ else
 fi
 
 
+# 漂漂圈
+if [[ "$time" > "16:00:00"  &&  "$time" < "16:59:59" ]];then
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 开始启动 漂漂圈 爬虫脚本任务" >> ${log_path}
+#  ps aux | grep run_htzf | grep -v grep | awk '{print $2}' | xargs kill -9
+#  ps aux | grep run_ganggangdouchuan | grep -v grep | awk '{print $2}' | xargs kill -9
+  ps aux | grep run_xngplus | grep -v grep | awk '{print $2}' | xargs kill -9
+  ps aux | grep run_xngrule | grep -v grep | awk '{print $2}' | xargs kill -9
+  ps aux | grep run_jixiangxingfu | grep -v grep | awk '{print $2}' | xargs kill -9
+  ps aux | grep run_zmyx | grep -v grep | awk '{print $2}' | xargs kill -9
+  ps aux | grep run_zfqz | grep -v grep | awk '{print $2}' | xargs kill -9
+  ps -ef | grep "run_ppq_recommend.py" | grep -v "grep"
+  if [ "$?" -eq 1 ];then
+    echo "$(date "+%Y-%m-%d %H:%M:%S") 漂漂圈 小程序爬虫, 异常停止, 正在重启!" >> ${log_path}
+    adb forward --remove-all
+    cd ${piaoquan_crawler_dir}
+    nohup python3 -u piaopiaoquan/piaopiaoquan_main/run_ppq_recommend.py --log_type="recommend" --crawler="zhufuquanzi" --env=${env} >>piaopiaoquan/logs/nohup-recommend.log 2>&1 &
+    echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
+  else
+    echo "$(date "+%Y-%m-%d %H:%M:%S") 漂漂圈 程序爬虫, 进程状态正常" >> ${log_path}
+  fi
+
+else
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 漂漂圈 爬虫脚本任务结束" >> ${log_path}
+fi
+
+
 
 
 #if [[ "$time" > "13:00:00"  &&  "$time" < "13:59:59" || "$time" > "16:00:00"  &&  "$time" < "18:59:59" || "$time" > "22:00:00"  &&  "$time" < "23:59:59" ]];then

+ 0 - 0
piaopiaoquan/__init__.py


+ 0 - 0
piaopiaoquan/piaopiaoquan/__init__.py


+ 393 - 0
piaopiaoquan/piaopiaoquan/piaopiaoquan_recommend.py

@@ -0,0 +1,393 @@
+# -*- coding: utf-8 -*-
+# @Author: zhangyong
+# @Time: 2023/11/24
+import json
+import os
+import random
+import sys
+import time
+import uuid
+from hashlib import md5
+
+import requests
+from appium import webdriver
+from appium.webdriver.extensions.android.nativekey import AndroidKey
+from appium.webdriver.webdriver import WebDriver
+from bs4 import BeautifulSoup
+from selenium.common.exceptions import NoSuchElementException
+from selenium.webdriver.common.by import By
+import multiprocessing
+
+
+sys.path.append(os.getcwd())
+from common import AliyunLogger, PiaoQuanPipeline, get_redirect_url
+from common.common import Common
+from common.mq import MQ
+
+
+class PPQRecommend:
+    env = None
+    driver = None
+    log_type = None
+
+    def __init__(self, log_type, crawler, env, rule_dict, our_uid):
+        self.mq = None
+        self.platform = "piaopiaoquan"
+        self.download_cnt = 0
+        self.element_list = []
+        self.count = 0
+        self.swipe_count = 0
+        self.log_type = log_type
+        self.crawler = crawler
+        self.env = env
+        self.rule_dict = rule_dict
+        self.our_uid = our_uid
+        if self.env == "dev":
+            chromedriverExecutable = "/Users/a123456/Downloads/chromedriver_v111/chromedriver"
+        else:
+            chromedriverExecutable = "/Users/a123456/Downloads/chromedriver_v111/chromedriver"
+
+        Common.logger(self.log_type, self.crawler).info("启动微信")
+        # Common.logging(self.log_type, self.crawler, self.env, '启动微信')
+        # 微信的配置文件
+        caps = {
+            "platformName": "Android",
+            "devicesName": "Android",
+            # "platformVersion": "11",
+            # "udid": "emulator-5554",
+            "appPackage": "com.tencent.mm",
+            "appActivity": ".ui.LauncherUI",
+            "autoGrantPermissions": "true",
+            "noReset": True,
+            "resetkeyboard": True,
+            "unicodekeyboard": True,
+            "showChromedriverLog": True,
+            "printPageSourceOnFailure": True,
+            "recreateChromeDriverSessions": True,
+            "enableWebviewDetailsCollection": True,
+            "setWebContentsDebuggingEnabled": True,
+            "newCommandTimeout": 6000,
+            "automationName": "UiAutomator2",
+            "chromedriverExecutable": chromedriverExecutable,
+            "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
+        }
+        try:
+            self.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
+        except Exception as e:
+            print(e)
+            AliyunLogger.logging(
+                code="3002",
+                platform=self.platform,
+                mode=self.log_type,
+                env=self.env,
+                message=f'appium 启动异常: {e}'
+            )
+            return
+        self.driver.implicitly_wait(30)
+
+        for i in range(120):
+            try:
+                if self.driver.find_elements(By.ID, "com.tencent.mm:id/f2s"):
+                    Common.logger(self.log_type, self.crawler).info("微信启动成功")
+                    # Common.logging(self.log_type, self.crawler, self.env, '微信启动成功')
+                    AliyunLogger.logging(
+                        code="1000",
+                        platform=self.platform,
+                        mode=self.log_type,
+                        env=self.env,
+                        message="启动微信成功"
+                    )
+                    break
+                elif self.driver.find_element(By.ID, "com.android.systemui:id/dismiss_view"):
+                    Common.logger(self.log_type, self.crawler).info("发现并关闭系统下拉菜单")
+                    # Common.logging(self.log_type, self.crawler, self.env, '发现并关闭系统下拉菜单')
+                    AliyunLogger.logging(
+                        code="1000",
+                        platform=self.platform,
+                        mode=self.log_type,
+                        env=self.env,
+                        message="发现并关闭系统下拉菜单"
+                    )
+                    self.driver.find_element(By.ID, "com.android.system:id/dismiss_view").click()
+                else:
+                    pass
+            except NoSuchElementException:
+                AliyunLogger.logging(
+                    code="3001",
+                    platform=self.platform,
+                    mode=self.log_type,
+                    env=self.env,
+                    message="打开微信异常"
+                )
+                time.sleep(1)
+
+        Common.logger(self.log_type, self.crawler).info("下滑,展示小程序选择面板")
+        size = self.driver.get_window_size()
+        self.driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2),
+                          int(size['width'] * 0.5), int(size['height'] * 0.8), 200)
+        time.sleep(1)
+        Common.logger(self.log_type, self.crawler).info('打开小程序"漂漂圈丨福年"')
+        self.driver.find_elements(By.XPATH, '//*[@text="漂漂圈丨福年"]')[-1].click()
+        AliyunLogger.logging(
+            code="1000",
+            platform=self.platform,
+            env=self.env,
+            mode=self.log_type,
+            message="打开小程序漂漂圈丨福年成功"
+
+        )
+        time.sleep(5)
+        self.get_videoList()
+        time.sleep(1)
+        self.driver.quit()
+
+    def search_elements(self, xpath):
+        time.sleep(1)
+        windowHandles = self.driver.window_handles
+        for handle in windowHandles:
+            self.driver.switch_to.window(handle)
+            time.sleep(1)
+            try:
+                elements = self.driver.find_elements(By.XPATH, xpath)
+                if elements:
+                    return elements
+            except NoSuchElementException:
+                pass
+
+    def check_to_applet(self, xpath):
+        time.sleep(1)
+        webViews = self.driver.contexts
+        self.driver.switch_to.context(webViews[-1])
+        windowHandles = self.driver.window_handles
+        for handle in windowHandles:
+            self.driver.switch_to.window(handle)
+            time.sleep(1)
+            try:
+                self.driver.find_element(By.XPATH, xpath)
+                Common.logger(self.log_type, self.crawler).info("切换到WebView成功\n")
+                # Common.logging(self.log_type, self.crawler, self.env, '切换到WebView成功\n')
+                AliyunLogger.logging(
+                    code="1000",
+                    platform=self.platform,
+                    mode=self.log_type,
+                    env=self.env,
+                    message="成功切换到 webview"
+                )
+                return
+            except NoSuchElementException:
+                time.sleep(1)
+
+    def swipe_up(self):
+        self.search_elements('//*[@class="dynamic--title-container"]')
+        size = self.driver.get_window_size()
+        self.driver.swipe(int(size["width"] * 0.5), int(size["height"] * 0.8),
+                          int(size["width"] * 0.5), int(size["height"] * 0.442), 200)
+        self.swipe_count += 1
+
+    def get_video_url(self, video_title_element):
+        for i in range(3):
+            self.search_elements('//*[@class="dynamic--title-container"]')
+            Common.logger(self.log_type, self.crawler).info(f"video_title_element:{video_title_element[0]}")
+            time.sleep(1)
+            Common.logger(self.log_type, self.crawler).info("滑动标题至可见状态")
+            self.driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'});",
+                                       video_title_element[0])
+            time.sleep(3)
+            Common.logger(self.log_type, self.crawler).info("点击标题")
+            video_title_element[0].click()
+            self.check_to_applet(xpath=r'//wx-video[@class="infos--title infos--ellipsis"]')
+            Common.logger(self.log_type, self.crawler).info("点击标题完成")
+            time.sleep(10)
+            video_url_elements = self.search_elements(
+                '//wx-video[@class="dynamic-index--video-item dynamic-index--video"]')
+            if video_url_elements:
+                return video_url_elements[0].get_attribute("src")
+
+    def parse_detail(self, index):
+        page_source = self.driver.page_source
+        soup = BeautifulSoup(page_source, 'html.parser')
+        soup.prettify()
+        video_list = soup.findAll(name="wx-view", attrs={"class": "expose--adapt-parent"})
+        element_list = [i for i in video_list][index:]
+        return element_list[0]
+
+    def get_video_info_2(self, video_element):
+        Common.logger(self.log_type, self.crawler).info(f"本轮已抓取{self.download_cnt}条视频\n")
+        # Common.logging(self.log_type, self.crawler, self.env, f"本轮已抓取{self.download_cnt}条视频\n")
+        if self.download_cnt >= int(self.rule_dict.get("videos_cnt", {}).get("min", 10)):
+            self.count = 0
+            self.download_cnt = 0
+            self.element_list = []
+            return
+        self.count += 1
+        Common.logger(self.log_type, self.crawler).info(f"第{self.count}条视频")
+        # 获取 trace_id, 并且把该 id 当做视频生命周期唯一索引
+        trace_id = self.crawler + str(uuid.uuid1())
+        AliyunLogger.logging(
+            code="1001",
+            platform=self.platform,
+            mode=self.log_type,
+            env=self.env,
+            trace_id=trace_id,
+            message="扫描到一条视频",
+        )
+        # 标题
+        video_title = video_element.find("wx-view", class_="dynamic--title").text
+        # 播放量字符串
+        play_str = video_element.find("wx-view", class_="dynamic--views").text
+        # 视频时长
+        duration_str = video_element.find("wx-view", class_="dynamic--duration").text
+        user_name = video_element.find("wx-view", class_="dynamic--nick-top").text
+        # 头像 URL
+        avatar_url = video_element.find("wx-image", class_="avatar--avatar")["src"]
+        # 封面 URL
+        cover_url = video_element.find("wx-image", class_="dynamic--bg-image")["src"]
+        play_cnt = int(play_str.replace("+", "").replace("次播放", ""))
+        duration = int(duration_str.split(":")[0].strip()) * 60 + int(duration_str.split(":")[-1].strip())
+        out_video_id = md5(video_title.encode('utf8')).hexdigest()
+        out_user_id = md5(user_name.encode('utf8')).hexdigest()
+
+        video_dict = {
+            "video_title": video_title,
+            "video_id": out_video_id,
+            'out_video_id': out_video_id,
+            "duration_str": duration_str,
+            "duration": duration,
+            "play_str": play_str,
+            "play_cnt": play_cnt,
+            "like_str": "",
+            "like_cnt": 0,
+            "comment_cnt": 0,
+            "share_cnt": 0,
+            "user_name": user_name,
+            "user_id": out_user_id,
+            'publish_time_stamp': int(time.time()),
+            'publish_time_str': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
+            'update_time_stamp': int(time.time()),
+            "avatar_url": avatar_url,
+            "cover_url": cover_url,
+            "session": f"xiaoniangao-{int(time.time())}"
+        }
+        pipeline = PiaoQuanPipeline(
+            platform=self.crawler,
+            mode=self.log_type,
+            item=video_dict,
+            rule_dict=self.rule_dict,
+            env=self.env,
+            trace_id=trace_id
+        )
+        flag = pipeline.process_item()
+        if flag:
+            video_title_element = self.search_elements(f'//*[contains(text(), "{video_title}")]')
+            if video_title_element is None:
+                return
+            Common.logger(self.log_type, self.crawler).info("点击标题,进入视频详情页")
+            AliyunLogger.logging(
+                code="1000",
+                platform=self.platform,
+                mode=self.log_type,
+                env=self.env,
+                message="点击标题,进入视频详情页",
+            )
+            video_url = self.get_video_url(video_title_element)
+            video_url = get_redirect_url(video_url)
+            if video_url is None:
+                self.driver.press_keycode(AndroidKey.BACK)
+                time.sleep(5)
+                return
+            video_dict['video_url'] = video_url
+            video_dict["platform"] = self.crawler
+            video_dict["strategy"] = self.log_type
+            video_dict["out_video_id"] = video_dict["video_id"]
+            video_dict["crawler_rule"] = json.dumps(self.rule_dict)
+            video_dict["user_id"] = self.our_uid
+            video_dict["publish_time"] = video_dict["publish_time_str"]
+            self.mq.send_msg(video_dict)
+            self.download_cnt += 1
+            self.driver.press_keycode(AndroidKey.BACK)
+            time.sleep(5)
+
+    def get_video_info(self, video_element):
+        try:
+            self.get_video_info_2(video_element)
+        except Exception as e:
+            Common.logger(self.log_type, self.crawler).error(f"抓取单条视频异常:{e}\n")
+            self.driver.press_keycode(AndroidKey.BACK)
+            AliyunLogger.logging(
+                code="3001",
+                platform=self.platform,
+                mode=self.log_type,
+                env=self.env,
+                message=f"抓取单条视频异常:{e}\n"
+            )
+
+    def get_videoList(self):
+        self.mq = MQ(topic_name="topic_crawler_etl_" + self.env)
+        self.driver.implicitly_wait(20)
+        # 切换到 web_view
+        self.check_to_applet(xpath='//*[@class="expose--adapt-parent"]')
+        print("切换到 webview 成功")
+        time.sleep(1)
+        page = 0
+        if self.search_elements('//*[@class="expose--adapt-parent"]') is None:
+            Common.logger(self.log_type, self.crawler).info("窗口已销毁\n")
+            # Common.logging(self.log_type, self.crawler, self.env, '窗口已销毁\n')
+            AliyunLogger.logging(
+                code="3000",
+                platform=self.platform,
+                mode=self.log_type,
+                env=self.env,
+                message="窗口已销毁"
+            )
+            self.count = 0
+            self.download_cnt = 0
+            self.element_list = []
+            return
+
+        print("开始获取视频信息")
+        for i in range(50):
+            print("下滑{}次".format(i))
+            element = self.parse_detail(i)
+            self.get_video_info(element)
+            self.swipe_up()
+            time.sleep(1)
+            if self.swipe_count > 100:
+                return
+
+        print("下滑完成")
+        # time.sleep(100)
+        Common.logger(self.log_type, self.crawler).info("已抓取完一组,休眠 5 秒\n")
+        # Common.logging(self.log_type, self.crawler, self.env, "已抓取完一组,休眠 5 秒\n")
+        AliyunLogger.logging(
+            code="1000",
+            platform=self.platform,
+            mode=self.log_type,
+            env=self.env,
+            message="已抓取完一组,休眠 5 秒\n",
+        )
+        time.sleep(5)
+
+
+def run():
+    rule_dict1 = {"period": {"min": 365, "max": 365},
+                  "duration": {"min": 30, "max": 1800},
+                  "favorite_cnt": {"min": 0, "max": 0},
+                  "videos_cnt": {"min": 5000, "max": 0},
+                  "share_cnt": {"min": 0, "max": 0}}
+    PPQRecommend("recommend", "piaopiaoquan", "dev", rule_dict1, [64120158, 64120157, 63676778])
+
+
+if __name__ == "__main__":
+    process = multiprocessing.Process(
+        target=run
+    )
+    process.start()
+    while True:
+        if not process.is_alive():
+            print("正在重启")
+            process.terminate()
+            time.sleep(60)
+            os.system("adb forward --remove-all")
+            process = multiprocessing.Process(target=run)
+            process.start()
+        time.sleep(60)

+ 0 - 0
piaopiaoquan/piaopiaoquan_main/__init__.py


+ 189 - 0
piaopiaoquan/piaopiaoquan_main/run_ppq_recommend.py

@@ -0,0 +1,189 @@
+# -*- coding: utf-8 -*-
+# @Author: luojunhui
+# @Time: 2023/9/27
+import argparse
+import random
+from mq_http_sdk.mq_client import *
+from mq_http_sdk.mq_consumer import *
+from mq_http_sdk.mq_exception import MQExceptionBase
+import multiprocessing
+
+
+sys.path.append(os.getcwd())
+from common.public import get_consumer, ack_message, task_fun_mq, get_rule_from_mysql
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from common import AliyunLogger
+from piaopiaoquan.piaopiaoquan.piaopiaoquan_recommend import PPQRecommend
+
+
+
+def run(args1, args2, args3, args4, args5):
+    PPQRecommend(
+        log_type=args1,
+        crawler=args2,
+        env=args3,
+        rule_dict=args4,
+        our_uid=args5
+    )
+
+
+def main(log_type, crawler, topic_name, group_id, env):
+    topic_name = "ppq_recommend_prod"
+    group_id = "ppq_recommend_prod"
+    consumer = get_consumer(topic_name, group_id)
+    # 长轮询表示如果Topic没有消息,则客户端请求会在服务端挂起3秒,3秒内如果有消息可以消费则立即返回响应。
+    # 长轮询时间3秒(最多可设置为30秒)。
+    wait_seconds = 30
+    # 一次最多消费3条(最多可设置为16条)。
+    batch = 1
+    Common.logger(log_type, crawler).info(f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                          f'WaitSeconds:{wait_seconds}\n'
+                                          f'TopicName:{topic_name}\n'
+                                          f'MQConsumer:{group_id}')
+    AliyunLogger.logging(
+        code="1000",
+        platform=log_type,
+        mode=crawler,
+        env=env,
+        message=f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                f"WaitSeconds:{wait_seconds}\n"
+                f"TopicName:{topic_name}\n"
+                f"MQConsumer:{group_id}",
+    )
+    while True:
+        try:
+            # 长轮询消费消息。
+            recv_msgs = consumer.consume_message(batch, wait_seconds)
+            for msg in recv_msgs:
+                Common.logger(log_type, crawler).info(f"Receive\n"
+                                                      f"MessageId:{msg.message_id}\n"
+                                                      f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                      f"MessageTag:{msg.message_tag}\n"
+                                                      f"ConsumedTimes:{msg.consumed_times}\n"
+                                                      f"PublishTime:{msg.publish_time}\n"
+                                                      f"Body:{msg.message_body}\n"
+                                                      f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                      f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                      f"Properties:{msg.properties}")
+                AliyunLogger.logging(
+                    code="1000",
+                    platform=log_type,
+                    mode=crawler,
+                    env=env,
+                    message=f"Receive\n"
+                            f"MessageId:{msg.message_id}\n"
+                            f"MessageBodyMD5:{msg.message_body_md5}\n"
+                            f"MessageTag:{msg.message_tag}\n"
+                            f"ConsumedTimes:{msg.consumed_times}\n"
+                            f"PublishTime:{msg.publish_time}\n"
+                            f"Body:{msg.message_body}\n"
+                            f"NextConsumeTime:{msg.next_consume_time}\n"
+                            f"ReceiptHandle:{msg.receipt_handle}\n"
+                            f"Properties:{msg.properties}",
+                )
+                # ack_mq_message
+                ack_message(log_type=log_type, crawler=crawler, recv_msgs=recv_msgs, consumer=consumer)
+
+                # 处理爬虫业务
+                task_dict = task_fun_mq(msg.message_body)['task_dict']
+                rule_dict = task_fun_mq(msg.message_body)['rule_dict']
+                task_id = task_dict['id']
+                select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+                user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+                our_uid_list = []
+                for user in user_list:
+                    our_uid_list.append(user["uid"])
+                our_uid = random.choice(our_uid_list)
+                AliyunLogger.logging(
+                    "1000", log_type, crawler, env, f"调度任务:{task_dict}"
+                )
+                # 解析 rule_dict
+                rule_dict = task_fun_mq(msg.message_body)["rule_dict"]
+                AliyunLogger.logging(
+                    "1000", log_type, crawler, env, f"抓取规则:{rule_dict}\n"
+                )
+                Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
+                new_r = get_rule_from_mysql(task_id=task_id, log_type=log_type, crawler=crawler, env=env)
+                r_d = {}
+                for item in new_r:
+                    for k, val in item.items():
+                        r_d[k] = val
+
+                process = multiprocessing.Process(
+                    target=run,
+                    args=(log_type, crawler, env, r_d, our_uid)
+                )
+                process.start()
+                AliyunLogger.logging(
+                    code="1003",
+                    platform=log_type,
+                    mode=crawler,
+                    env=env,
+                    message="成功获取信息,启动爬虫,开始一轮抓取",
+                )
+                print("进程开始")
+                while True:
+                    if not process.is_alive():
+                        print("正在重启")
+                        process.terminate()
+                        os.system("adb forward --remove-all")
+                        time.sleep(60)
+                        new_r = get_rule_from_mysql(task_id=task_id, log_type=log_type, crawler=crawler, env=env)
+                        r_d = {}
+                        for item in new_r:
+                            for k, val in item.items():
+                                r_d[k] = val
+                        Common.logger(log_type, crawler).info(f'抓取规则:{r_d}')
+                        Common.logging(log_type, crawler, env, f"抓取规则:{r_d}")
+                        process = multiprocessing.Process(target=run, args=(log_type, crawler, env, r_d, our_uid))
+                        process.start()
+                        AliyunLogger.logging(
+                            code="1004",
+                            platform=log_type,
+                            mode=crawler,
+                            env=env,
+                            message="成功抓取完一轮",
+                        )
+                    time.sleep(60)
+        except MQExceptionBase as err:
+            # Topic中没有消息可消费。
+            if err.type == "MessageNotExist":
+                AliyunLogger.logging(
+                    code="1000",
+                    platform=log_type,
+                    mode=crawler,
+                    env=env,
+                    message=f"No new message! RequestId:{err.req_id}\n",
+                )
+                Common.logging(
+                    log_type=log_type,
+                    crawler=crawler,
+                    env=env,
+                    message=f"No new message! RequestId:{err.req_id}\n",
+                )
+                continue
+            AliyunLogger.logging(
+                code="1000",
+                platform=log_type,
+                mode=crawler,
+                env=env,
+                message=f"Consume Message Fail! Exception:{err}\n",
+            )
+            time.sleep(2)
+            continue
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--topic_name')  ## 添加参数
+    parser.add_argument('--group_id')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         topic_name=args.topic_name,
+         group_id=args.group_id,
+         env=args.env)

Неке датотеке нису приказане због велике количине промена