Selaa lähdekoodia

刚刚都传——优化
增加 auto_add_author方法

罗俊辉 1 vuosi sitten
vanhempi
commit
1d83a1b343

+ 17 - 0
common/auto_add_author.py

@@ -0,0 +1,17 @@
+"""
+从推荐流中,获取账号信息,并且把账号信息自动化添加到 Mysql 数据库中
+"""
+
+from .db import MysqlHelper
+
+
+class AutoAddAuthor:
+    def __init__(self, rule, account):
+        self.rule = rule
+        self.account = account
+
+    def add_account(self):
+        return self.account
+
+    def judge_rule(self):
+        return self.rule

+ 47 - 61
ganggangdouchuan/ganggangdouchuan_main/run_ggdc_recommend.py

@@ -13,7 +13,7 @@ from common.common import Common
 from common.public import get_consumer, ack_message, task_fun_mq, get_rule_from_mysql
 from common.scheduling_db import MysqlHelper
 from ganggangdouchuan.ganggangdouchuan_recommend import GGDCScheduling
-
+from common.aliyun_log import AliyunLogger
 
 class GGDCMain:
     @classmethod
@@ -24,43 +24,27 @@ class GGDCMain:
         wait_seconds = 30
         # 一次最多消费3条(最多可设置为16条)。
         batch = 1
-        Common.logger(log_type, crawler).info(
-            f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
-            f"WaitSeconds:{wait_seconds}\n"
-            f"TopicName:{topic_name}\n"
-            f"MQConsumer:{group_id}"
-        )
-        Common.logging(
-            log_type,
-            crawler,
-            env,
-            f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
-            f"WaitSeconds:{wait_seconds}\n"
-            f"TopicName:{topic_name}\n"
-            f"MQConsumer:{group_id}",
+        AliyunLogger.logging(
+            code="1000",
+            platform=crawler,
+            mode=log_type,
+            env=env,
+            message=f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                    f"WaitSeconds:{wait_seconds}\n"
+                    f"TopicName:{topic_name}\n"
+                    f"MQConsumer:{group_id}",
         )
         while True:
             try:
                 # 长轮询消费消息。
                 recv_msgs = consumer.consume_message(batch, wait_seconds)
                 for msg in recv_msgs:
-                    Common.logger(log_type, crawler).info(
-                        f"Receive\n"
-                        f"MessageId:{msg.message_id}\n"
-                        f"MessageBodyMD5:{msg.message_body_md5}\n"
-                        f"MessageTag:{msg.message_tag}\n"
-                        f"ConsumedTimes:{msg.consumed_times}\n"
-                        f"PublishTime:{msg.publish_time}\n"
-                        f"Body:{msg.message_body}\n"
-                        f"NextConsumeTime:{msg.next_consume_time}\n"
-                        f"ReceiptHandle:{msg.receipt_handle}\n"
-                        f"Properties:{msg.properties}"
-                    )
-                    Common.logging(
-                        log_type,
-                        crawler,
-                        env,
-                        f"Receive\n"
+                    AliyunLogger.logging(
+                        code="1000",
+                        platform=crawler,
+                        mode=log_type,
+                        env=env,
+                        message=f"Receive\n"
                         f"MessageId:{msg.message_id}\n"
                         f"MessageBodyMD5:{msg.message_body_md5}\n"
                         f"MessageTag:{msg.message_tag}\n"
@@ -81,6 +65,9 @@ class GGDCMain:
 
                     # 处理爬虫业务
                     task_dict = task_fun_mq(msg.message_body)["task_dict"]
+                    AliyunLogger.logging(
+                        "1000", crawler, log_type, env, f"调度任务:{task_dict}"
+                    )
                     rule_dict = task_fun_mq(msg.message_body)["rule_dict"]
                     task_id = task_dict["id"]
                     select_user_sql = (
@@ -93,17 +80,12 @@ class GGDCMain:
                     for user in user_list:
                         our_uid_list.append(user["uid"])
                     our_uid = random.choice(our_uid_list)
-                    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
-                    Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
-                    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
-                    Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}")
-                    Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
-                    Common.logging(log_type, crawler, env, f"用户列表:{user_list}\n")
-                    Common.logger(log_type, crawler).info(
-                        f'开始抓取:{task_dict["taskName"]}\n'
-                    )
-                    Common.logging(
-                        log_type, crawler, env, f'开始抓取:{task_dict["taskName"]}\n'
+                    AliyunLogger.logging(
+                        code="1003",
+                        platform=crawler,
+                        mode=log_type,
+                        env=env,
+                        message="成功获取信息,启动爬虫,开始一轮抓取",
                     )
                     new_r = get_rule_from_mysql(
                         task_id=task_id, log_type=log_type, crawler=crawler, env=env
@@ -112,8 +94,9 @@ class GGDCMain:
                     for item in new_r:
                         for k, val in item.items():
                             r_d[k] = val
-                    Common.logger(log_type, crawler).info(f"抓取规则:{r_d}")
-                    Common.logging(log_type, crawler, env, f"抓取规则:{r_d}")
+                    AliyunLogger.logging(
+                        "1000", crawler, log_type, env, f"抓取规则:{r_d}"
+                    )
                     # 初始化
                     GGDC = GGDCScheduling(
                         log_type=log_type,
@@ -131,28 +114,31 @@ class GGDCMain:
                         else:
                             GGDC.get_videoList(page_id=i + 1)
                             time.sleep(60)
-                    Common.logger(log_type, crawler).info("抓取一轮结束\n")
-                    Common.logging(log_type, crawler, env, "抓取一轮结束\n")
+                    AliyunLogger.logging(
+                        code="1004",
+                        platform=crawler,
+                        mode=log_type,
+                        env=env,
+                        message="成功抓取完一轮",
+                    )
 
             except MQExceptionBase as err:
                 # Topic中没有消息可消费。
                 if err.type == "MessageNotExist":
-                    Common.logger(log_type, crawler).info(
-                        f"No new message! RequestId:{err.req_id}\n"
-                    )
-                    Common.logging(
-                        log_type,
-                        crawler,
-                        env,
-                        f"No new message! RequestId:{err.req_id}\n",
+                    AliyunLogger.logging(
+                        code="1000",
+                        platform=crawler,
+                        mode=log_type,
+                        env=env,
+                        message=f"No new message! RequestId:{err.req_id}\n",
                     )
                     continue
-
-                Common.logger(log_type, crawler).info(
-                    f"Consume Message Fail! Exception:{err}\n"
-                )
-                Common.logging(
-                    log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n"
+                AliyunLogger.logging(
+                    code="1000",
+                    platform=crawler,
+                    mode=log_type,
+                    env=env,
+                    message=f"Consume Message Fail! Exception:{err}\n",
                 )
                 time.sleep(2)
                 continue

+ 82 - 96
ganggangdouchuan/ganggangdouchuan_recommend/ganggangdouchuan_recommend2.py

@@ -6,6 +6,7 @@ import os
 import random
 import sys
 import time
+import uuid
 import requests
 from Crypto.Cipher import AES
 from Crypto.Hash import MD5
@@ -16,34 +17,9 @@ from common.mq import MQ
 
 sys.path.append(os.getcwd())
 from common.common import Common
-from common.scheduling_db import MysqlHelper
-from common.public import get_config_from_mysql, download_rule_v2
-
-proxies = {"http": None, "https": None}
-
-
-def clean_title(strings):
-    return (
-        strings.strip()
-        .replace("\n", "")
-        .replace("/", "")
-        .replace("\r", "")
-        .replace("#", "")
-        .replace(".", "。")
-        .replace("\\", "")
-        .replace("&NBSP", "")
-        .replace(":", "")
-        .replace("*", "")
-        .replace("?", "")
-        .replace("?", "")
-        .replace('"', "")
-        .replace("<", "")
-        .replace(">", "")
-        .replace("|", "")
-        .replace(" ", "")
-        .replace('"', "")
-        .replace("'", "")
-    )
+from common.aliyun_log import AliyunLogger
+from common.pipeline import PiaoQuanPipeline
+from common.public import clean_title
 
 
 def decrypt(a, e, n):
@@ -106,13 +82,6 @@ class GGDCScheduling:
         self.mq = MQ(topic_name="topic_crawler_etl_" + self.env)
         self.download_count = 0
 
-    def repeat_video(self, video_id):
-        sql = f""" select * from crawler_video where platform in ("{self.crawler}","{self.platform}") and out_video_id="{video_id}"; """
-        repeat_video = MysqlHelper.get_values(
-            self.log_type, self.crawler, sql, self.env
-        )
-        return len(repeat_video)
-
     # 获取视频id_list
     def get_videoList(self, page_id):
         time.sleep(random.randint(5, 10))
@@ -122,15 +91,11 @@ class GGDCScheduling:
             "page": page_id,
             "timeline": 0,
             "version": "9.0.2",
-            # "timestamp": 1697700674000,
-            # "openid": "oNnpe5SwkfGtD5aJAaRbsIKIEdjc",
         }
         headers = {
             "Host": "ganggangdouchuan2.mengniu99.com",
-            # "Authorization": "oNnpe5SwkfGtD5aJAaRbsIKIEdjc",
             "xweb_xhr": "1",
             "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 MicroMessenger/6.8.0(0x16080000) NetType/WIFI MiniProgramEnv/Mac MacWechat/WMPF XWEB/30817",
-            # "Sign": "88ab143a9401ebac3525562073248231",
             "Content-Type": "application/json",
             "Accept": "*/*",
             "Sec-Fetch-Site": "cross-site",
@@ -144,42 +109,82 @@ class GGDCScheduling:
             try:
                 response = requests.get(url, headers=headers, params=params)
                 decrypted_data = decrypt(
-                    response.json()["data"], response.json()["_yyy"], True
+                    response.json()["data"][:-2], response.json()["_yyy"], True
                 )
                 result = json.loads(decrypted_data)
+                AliyunLogger.logging(
+                    code="1000",
+                    platform=self.crawler,
+                    mode=self.log_type,
+                    env=self.env,
+                    data={},
+                    message="开始抓取第{}页".format(page_id),
+                )
                 break
             except:
-                Common.logger(self.log_type, self.crawler).info("编码不对,解密失败, 等待10秒钟后重试\n")
-                # print("编码不对,解密失败, 等待10秒钟")
-                time.sleep(10)
+                AliyunLogger.logging(
+                    code="2000",
+                    platform=self.crawler,
+                    mode=self.log_type,
+                    env=self.env,
+                    data={},
+                    message="抓取第{}页,未获取数据,编码错误".format(page_id),
+                )
+                Common.logger(self.log_type, self.crawler).info("编码不对,解密失败\n")
+                return
 
-        if "totalCount" not in result or response.status_code != 200:
+        if "totalCount" not in result:
             Common.logger(self.log_type, self.crawler).info(
                 f"get_videoList:{response.text}\n"
             )
-            Common.logging(
-                self.log_type,
-                self.crawler,
-                self.env,
-                f"get_videoList:{response.text}\n",
+            AliyunLogger.logging(
+                code="2000",
+                platform=self.crawler,
+                mode=self.log_type,
+                env=self.env,
+                data={},
+                message="抓取第{}页,未获取数据".format(page_id),
             )
             return
         elif len(result["videos"]) == 0:
             Common.logger(self.log_type, self.crawler).info(f"没有更多数据啦~\n")
-            Common.logging(self.log_type, self.crawler, self.env, f"没有更多数据啦~\n")
+            AliyunLogger.logging(
+                code="2000",
+                platform=self.crawler,
+                mode=self.log_type,
+                env=self.env,
+                data={},
+                message="抓取第{}页,没有更多数据啦".format(page_id),
+            )
             return
         else:
             data_list = result["videos"]
-            for video_obj in data_list:
+            for index, video_obj in enumerate(data_list):
                 try:
                     self.process_video_obj(video_obj)
                 except Exception as e:
                     Common.logger(self.log_type, self.crawler).error(f"抓取单条视频异常:{e}\n")
-                    Common.logging(
-                        self.log_type, self.crawler, self.env, f"抓取单条视频异常:{e}\n"
+                    AliyunLogger.logging(
+                        code="3000",
+                        platform=self.crawler,
+                        mode=self.log_type,
+                        env=self.env,
+                        data=video_obj,
+                        message="抓取单条视频异常, 报错原因是: {}, 该视频位于第{}页{}条".format(
+                            e, page_id, index + 1
+                        ),
                     )
+            AliyunLogger.logging(
+                code="1000",
+                platform=self.crawler,
+                mode=self.log_type,
+                env=self.env,
+                data={},
+                message="完成抓取第{}页".format(page_id),
+            )
 
     def process_video_obj(self, video_obj):
+        trace_id = self.platform + str(uuid.uuid1())
         video_id = video_obj.get("videoid", 0)
         video_title = clean_title(video_obj.get("title", "no title"))
         video_time = video_obj.get("v_time", 0)
@@ -199,6 +204,7 @@ class GGDCScheduling:
             "user_name": user_name,
             "publish_time_stamp": publish_time_stamp,
             "publish_time_str": publish_time_str,
+            "update_time_stamp": int(time.time()),
             "video_width": 0,
             "video_height": 0,
             "profile_id": 0,
@@ -206,48 +212,20 @@ class GGDCScheduling:
             "cover_url": video_obj["cover"],
             "session": f"ganggangdouchuan-{int(time.time())}",
         }
-        for k, v in video_dict.items():
-            Common.logger(self.log_type, self.crawler).info(f"{k}:{v}")
-        Common.logging(self.log_type, self.crawler, self.env, f"{video_dict}")
-        # 过滤无效视频
-        if video_title == "" or video_dict["video_id"] == "":
-            Common.logger(self.log_type, self.crawler).info("无效视频\n")
-            Common.logging(self.log_type, self.crawler, self.env, "无效视频\n")
-            # 抓取基础规则过滤
-        elif (
-            download_rule_v2(
-                log_type=self.log_type,
-                crawler=self.crawler,
-                video_dict=video_dict,
-                rule_dict=self.rule_dict,
-            )
-            is False
-        ):
-            Common.logger(self.log_type, self.crawler).info("不满足抓取规则\n")
-            Common.logging(self.log_type, self.crawler, self.env, "不满足抓取规则\n")
-        elif (
-            any(
-                str(word) if str(word) in video_dict["video_title"] else False
-                for word in get_config_from_mysql(
-                    log_type=self.log_type,
-                    source=self.crawler,
-                    env=self.env,
-                    text="filter",
-                    action="",
-                )
-            )
-            is True
-        ):
-            Common.logger(self.log_type, self.crawler).info("已中过滤词\n")
-            Common.logging(self.log_type, self.crawler, self.env, "已中过滤词\n")
-        elif self.repeat_video(video_dict["video_id"]) != 0:
-            Common.logger(self.log_type, self.crawler).info("视频已下载\n")
-            Common.logging(self.log_type, self.crawler, self.env, "视频已下载\n")
-        else:
+        video_dict["out_video_id"] = str(video_dict["video_id"])
+        rule_pipeline = PiaoQuanPipeline(
+            platform=self.platform,
+            mode=self.log_type,
+            rule_dict=self.rule_dict,
+            env=self.env,
+            item=video_dict,
+            trace_id=trace_id
+        )
+        flag = rule_pipeline.process_item()
+        if flag:
             video_dict["out_user_id"] = video_dict["profile_id"]
             video_dict["platform"] = self.crawler
             video_dict["strategy"] = self.log_type
-            video_dict["out_video_id"] = str(video_dict["video_id"])
             video_dict["width"] = video_dict["video_width"]
             video_dict["height"] = video_dict["video_height"]
             video_dict["crawler_rule"] = json.dumps(self.rule_dict)
@@ -256,19 +234,27 @@ class GGDCScheduling:
             video_dict["video_url"] = find_tencent_url(video_obj["txvid"])
             video_dict["avatar_url"] = video_obj["avatarurl"]
             video_dict["cover_url"] = video_obj["cover"]
-            # print(json.dumps(video_dict, ensure_ascii=False, indent=4))
             self.download_count += 1
             self.mq.send_msg(video_dict)
+            AliyunLogger.logging(
+                code="1002",
+                platform=self.crawler,
+                mode=self.log_type,
+                env=self.env,
+                data=video_dict,
+                trace_id=trace_id,
+                message="成功发送 MQ 至 ETL",
+            )
 
 
 if __name__ == "__main__":
     ZL = GGDCScheduling(
         log_type="recommend",
-        crawler="ggdc",
+        crawler="ganggangdouchuan",
         rule_dict={},
         our_uid="luojunhuihaoshuai",
-        env="dev",
+        env="prod",
     )
-    for i in range(5):
+    for i in range(1):
         ZL.get_videoList(page_id=i + 1)
         print(ZL.download_count)