瀏覽代碼

美好星河代码优化

zhangyong 1 年之前
父節點
當前提交
047518e6c0

+ 1 - 1
kaixinxingfudaowanjia/kaixinxingfudaowanjia_main/run_kxxfdwj_recommend.py

@@ -109,7 +109,7 @@ def main(log_type, crawler, topic_name, group_id, env):
                     platform=crawler,
                     mode=log_type,
                     env=env,
-                    message="完成抓取——经典福气旺",
+                    message="完成抓取——开心幸福到万家",
                 )
                 AliyunLogger.logging(
                     code="1004", platform=crawler, mode=log_type, env=env,message="结束一轮抓取"

文件差異過大導致無法顯示
+ 0 - 0
kanyikan/kanyikan_recommend/kanyikan/chlsfiles/charles202311231408.chlsj


文件差異過大導致無法顯示
+ 0 - 0
kanyikan/kanyikan_recommend/kanyikan/chlsfiles/charles202311231409.txt


文件差異過大導致無法顯示
+ 0 - 0
kanyikan/kanyikan_recommend/kanyikan/chlsfiles/charles202311231410.txt


文件差異過大導致無法顯示
+ 0 - 0
kanyikan/kanyikan_recommend/kanyikan/chlsfiles/charles202311231412.txt


文件差異過大導致無法顯示
+ 0 - 0
kanyikan/kanyikan_recommend/kanyikan/chlsfiles/charles202311231413.txt


文件差異過大導致無法顯示
+ 0 - 0
kanyikan/kanyikan_recommend/kanyikan/chlsfiles/charles202311231414.txt


文件差異過大導致無法顯示
+ 0 - 0
kanyikan/kanyikan_recommend/kanyikan/chlsfiles/charles202311301125.txt


文件差異過大導致無法顯示
+ 0 - 0
kanyikan/kanyikan_recommend/kanyikan/chlsfiles/charles202311301126.chlsj


文件差異過大導致無法顯示
+ 0 - 0
kanyikan/kanyikan_recommend/kanyikan/chlsfiles/charles202311301127.chlsj


+ 72 - 67
meihaoxinghe/meihaoxinghe_main/run_mhxh_recommend.py

@@ -1,16 +1,15 @@
 import argparse
-import random
-
 from mq_http_sdk.mq_client import *
 from mq_http_sdk.mq_consumer import *
 from mq_http_sdk.mq_exception import MQExceptionBase
 
 
 sys.path.append(os.getcwd())
-from common.common import Common
 from common.public import task_fun_mq, get_consumer, ack_message
 from common.scheduling_db import MysqlHelper
-from meihaoxinghe.meihaoxinghe_recommend.meihaoxinghe_recommend import MHXHspcheduling
+from common import AliyunLogger
+from kaixinxingfudaowanjia.kaixinxingfudaowanjia_recommend.kaixinxingfudaowanjia_recommend import Kxxfdwjspcheduling
+
 
 def main(log_type, crawler, topic_name, group_id, env):
     consumer = get_consumer(topic_name, group_id)
@@ -19,17 +18,12 @@ def main(log_type, crawler, topic_name, group_id, env):
     wait_seconds = 30
     # 一次最多消费3条(最多可设置为16条)。
     batch = 1
-    Common.logger(log_type, crawler).info(
-        f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
-        f"WaitSeconds:{wait_seconds}\n"
-        f"TopicName:{topic_name}\n"
-        f"MQConsumer:{group_id}"
-    )
-    Common.logging(
-        log_type,
-        crawler,
-        env,
-        f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+    AliyunLogger.logging(
+        code="1000",
+        platform=crawler,
+        mode=log_type,
+        env=env,
+        message=f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
         f"WaitSeconds:{wait_seconds}\n"
         f"TopicName:{topic_name}\n"
         f"MQConsumer:{group_id}",
@@ -39,24 +33,12 @@ def main(log_type, crawler, topic_name, group_id, env):
             # 长轮询消费消息。
             recv_msgs = consumer.consume_message(batch, wait_seconds)
             for msg in recv_msgs:
-                xng_author_start_time = int(time.time())
-                Common.logger(log_type, crawler).info(
-                    f"Receive\n"
-                    f"MessageId:{msg.message_id}\n"
-                    f"MessageBodyMD5:{msg.message_body_md5}\n"
-                    f"MessageTag:{msg.message_tag}\n"
-                    f"ConsumedTimes:{msg.consumed_times}\n"
-                    f"PublishTime:{msg.publish_time}\n"
-                    f"Body:{msg.message_body}\n"
-                    f"NextConsumeTime:{msg.next_consume_time}\n"
-                    f"ReceiptHandle:{msg.receipt_handle}\n"
-                    f"Properties:{msg.properties}"
-                )
-                Common.logging(
-                    log_type,
-                    crawler,
-                    env,
-                    f"Receive\n"
+                AliyunLogger.logging(
+                    code="1000",
+                    platform=crawler,
+                    mode=log_type,
+                    env=env,
+                    message=f"Receive\n"
                     f"MessageId:{msg.message_id}\n"
                     f"MessageBodyMD5:{msg.message_body_md5}\n"
                     f"MessageTag:{msg.message_tag}\n"
@@ -74,10 +56,25 @@ def main(log_type, crawler, topic_name, group_id, env):
                     recv_msgs=recv_msgs,
                     consumer=consumer,
                 )
-
-                # 处理爬虫业务
+                # 解析 task_dict
                 task_dict = task_fun_mq(msg.message_body)["task_dict"]
+                AliyunLogger.logging(
+                    code="1000",
+                    platform=crawler,
+                    mode=log_type,
+                    env=env,
+                    message="f调度任务:{task_dict}",
+                )
+                # 解析 rule_dict
                 rule_dict = task_fun_mq(msg.message_body)["rule_dict"]
+                AliyunLogger.logging(
+                    code="1000",
+                    platform=crawler,
+                    mode=log_type,
+                    env=env,
+                    message=f"抓取规则:{rule_dict}\n",
+                )
+                # 解析 user_list
                 task_id = task_dict["id"]
                 select_user_sql = (
                     f"""select * from crawler_user_v3 where task_id={task_id}"""
@@ -85,48 +82,56 @@ def main(log_type, crawler, topic_name, group_id, env):
                 user_list = MysqlHelper.get_values(
                     log_type, crawler, select_user_sql, env, action=""
                 )
-                Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
-                Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
-                Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
-                Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}")
-                Common.logger(log_type, crawler).info(f"用户列表:\n{user_list}")
-                Common.logging(log_type, crawler, env, f"用户列表:\n{user_list}")
-                Common.logger(log_type, crawler).info(f'开始抓取:{task_dict["taskName"]}\n')
-                Common.logging(
-                    log_type, crawler, env, f'开始抓取:{task_dict["taskName"]}\n'
+                AliyunLogger.logging(
+                    code="1003",
+                    platform=crawler,
+                    mode=log_type,
+                    env=env,
+                    message="开始抓取"
                 )
-                our_uid_list = []
-                for user in user_list:
-                    our_uid_list.append(user["uid"])
-                our_uid = random.choice(our_uid_list)
-                HH = MHXHspcheduling(
-                    log_type=log_type,
-                    crawler=crawler,
+                AliyunLogger.logging(
+                    code="1000",
+                    platform=crawler,
+                    mode=log_type,
+                    env=env,
+                    message="开始抓取美好星河——推荐",
+                )
+                main_process = Kxxfdwjspcheduling(
+                    platform=crawler,
+                    mode=log_type,
                     rule_dict=rule_dict,
+                    user_list=user_list,
+                    env=env
+                )
+                main_process.get_video_list()
+                AliyunLogger.logging(
+                    code="1000",
+                    platform=crawler,
+                    mode=log_type,
                     env=env,
-                    our_uid=our_uid
+                    message="完成抓取——美好星河",
+                )
+                AliyunLogger.logging(
+                    code="1004", platform=crawler, mode=log_type, env=env,message="结束一轮抓取"
                 )
-
-                HH.get_videoList()
-                Common.logger(log_type, crawler).info("抓取一轮结束\n")
-                Common.logging(log_type, crawler, env, "抓取一轮结束\n")
 
         except MQExceptionBase as err:
             # Topic中没有消息可消费。
             if err.type == "MessageNotExist":
-                Common.logger(log_type, crawler).info(
-                    f"No new message! RequestId:{err.req_id}\n"
-                )
-                Common.logging(
-                    log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n"
+                AliyunLogger.logging(
+                    code="2000",
+                    platform=crawler,
+                    mode=log_type,
+                    env=env,
+                    message=f"No new message! RequestId:{err.req_id}\n",
                 )
                 continue
-
-            Common.logger(log_type, crawler).info(
-                f"Consume Message Fail! Exception:{err}\n"
-            )
-            Common.logging(
-                log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n"
+            AliyunLogger.logging(
+                code="2000",
+                platform=crawler,
+                mode=log_type,
+                env=env,
+                message=f"Consume Message Fail! Exception:{err}\n",
             )
             time.sleep(2)
             continue

+ 130 - 207
meihaoxinghe/meihaoxinghe_recommend/meihaoxinghe_recommend.py

@@ -1,112 +1,30 @@
-# -*- coding: utf-8 -*-
-# @Author: zhangyong
-# @Time: 2023/12/07
-import json
 import os
+import json
 import random
 import sys
 import time
-import requests
-from hashlib import md5
-from datetime import datetime
+import uuid
 
-from common import get_redirect_url
-from common.mq import MQ
+import requests
 
 sys.path.append(os.getcwd())
-from common.common import Common
-from common.scheduling_db import MysqlHelper
-from common.public import get_config_from_mysql, download_rule
-
-proxies = {"http": None, "https": None}
-
-
-def clean_title(strings):
-    return (
-        strings.strip()
-        .replace("\n", "")
-        .replace("/", "")
-        .replace("\r", "")
-        .replace("#", "")
-        .replace(".", "。")
-        .replace("\\", "")
-        .replace("&NBSP", "")
-        .replace(":", "")
-        .replace("*", "")
-        .replace("?", "")
-        .replace("?", "")
-        .replace('"', "")
-        .replace("<", "")
-        .replace(">", "")
-        .replace("|", "")
-        .replace(" ", "")
-        .replace('"', "")
-        .replace("'", "")
-    )
+from common.video_item import VideoItem
+from common import PiaoQuanPipeline, AliyunLogger, tunnel_proxies
+from common.mq import MQ
 
 
-class MHXHspcheduling:
-    def __init__(self, log_type, crawler, rule_dict, env, our_uid):
-        self.platform = "美好星河"
-        self.log_type = log_type
-        self.crawler = crawler
+class MHXHspcheduling(object):
+    def __init__(self, platform, mode, rule_dict, user_list, env):
+        self.platform = platform
+        self.mode = mode
         self.rule_dict = rule_dict
+        self.user_list = user_list
         self.env = env
-        self.our_uid = our_uid
+        self.download_cnt = 0
         self.mq = MQ(topic_name="topic_crawler_etl_" + self.env)
-        self.download_count = 0
-
-    def repeat_video(self, video_id):
-        sql = f""" select * from crawler_video where platform in ("{self.crawler}","{self.platform}") and out_video_id="{video_id}"; """
-        repeat_video = MysqlHelper.get_values(
-            self.log_type, self.crawler, sql, self.env
-        )
-        return len(repeat_video)
+        self.limit_flag = False
 
-    # 获取视频id_list
-    def get_videoList(self):
-        for i in range(1, 10):
-            time.sleep(random.randint(5, 10))
-            url = "https://app.miguoyun.cn/app/index.php?i=959&t=1&m=jyt_txvideo&v=31.1.11&from=wxapp&c=entry&a=wxapp&do=videolist&sign=bf3a8068467ce73c96a0409ae1136c4f"
-            payload = "category=476&page=1&israndom=1&type=0&isview=&noauth=true"
-            headers = {
-                'Host': 'app.miguoyun.cn',
-                'accept': '*/*',
-                'content-type': 'application/x-www-form-urlencoded',
-                'accept-language': 'zh-cn',
-                'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E217 MicroMessenger/6.8.0(0x16080000) NetType/WIFI Language/en Branch/Br_trunk MiniProgramEnv/Mac',
-                'referer': 'https://servicewechat.com/wx08c7ede18f448973/20/page-frame.html',
-                'Cookie': '0a52___multiid=1; 0a52_logout=; PHPSESSID=d7eaef273b29b6e2a2b0130b0bfb92bc'
-            }
-
-            response = requests.post(url, headers=headers, data=payload)
-            if "data" not in response.text or response.status_code != 200:
-                Common.logger(self.log_type, self.crawler).info(
-                    f"get_videoList:{response.text}\n"
-                )
-                Common.logging(
-                    self.log_type,
-                    self.crawler,
-                    self.env,
-                    f"get_videoList:{response.text}\n",
-                )
-                return
-            elif len(response.json()["data"]) == 0:
-                Common.logger(self.log_type, self.crawler).info(f"没有更多数据啦~\n")
-                Common.logging(self.log_type, self.crawler, self.env, f"没有更多数据啦~\n")
-                return
-            else:
-                data_list = response.json()["data"]
-                for video_obj in data_list:
-                    try:
-                        self.process_video_obj(video_obj)
-                    except Exception as e:
-                        Common.logger(self.log_type, self.crawler).error(f"抓取单条视频异常:{e}\n")
-                        Common.logging(
-                            self.log_type, self.crawler, self.env, f"抓取单条视频异常:{e}\n"
-                        )
-
-    def get_video_list(self, video_id):
+    def video_list(self, video_id):
         url = "https://app.miguoyun.cn/app/index.php?i=959&t=0&m=jyt_txvideo&v=31.1.11&from=wxapp&c=entry&a=wxapp&do=videoinfo&vid={}&version=1.0.3".format(
             video_id)
         headers = {
@@ -120,124 +38,129 @@ class MHXHspcheduling:
         }
 
         response = requests.post(url, headers=headers)
-        if "data" not in response.text or response.status_code != 200:
-            Common.logger(self.log_type, self.crawler).info(
-                f"get_videoList:{response.text}\n"
-            )
-            Common.logging(
-                self.log_type,
-                self.crawler,
-                self.env,
-                f"get_videoList:{response.text}\n",
-            )
-            return
-        elif len(response.json()["data"]) == 0:
-            Common.logger(self.log_type, self.crawler).info(f"详情页数据为空~\n")
-            Common.logging(self.log_type, self.crawler, self.env, f"详情页数据为空~\n")
-            return
-        else:
-            data_list = response.json()["data"]
-            return data_list
-
+        data_list = response.json()["data"]
+        return data_list
 
-    def process_video_obj(self, video_obj):
-        video_id = video_obj.get("vid", 0)
-        get_video_list = self.get_video_list(video_id)
-        video_title = clean_title(get_video_list.get("vtitle", "no title"))
-        video_time = video_obj.get("create_time", 0)
-        publish_time_stamp = int(video_time)
-        publish_time_str = time.strftime(
-            "%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp)
-        )
-        user_name = ""
-        video_dict = {
-            "video_title": video_title,
-            "video_id": video_id,
-            "duration": video_time,
-            "play_cnt": video_obj.get("visited", 0),
-            "like_cnt": 0,
-            "comment_cnt": 0,
-            "share_cnt": video_obj.get("shared", 0),
-            "user_name": user_name,
-            "publish_time_stamp": publish_time_stamp,
-            "publish_time_str": publish_time_str,
-            "video_width": 0,
-            "video_height": 0,
-            "profile_id": 0,
-            "profile_mid": 0,
-            # "cover_url": "",
-            "session": f"meihaoxinghe-{int(time.time())}",
+    def get_video_list(self):
+        url = "https://app.miguoyun.cn/app/index.php?i=959&t=1&m=jyt_txvideo&v=31.1.11&from=wxapp&c=entry&a=wxapp&do=videolist&sign=bf3a8068467ce73c96a0409ae1136c4f"
+        headers = {
+            'Host': 'app.miguoyun.cn',
+            'accept': '*/*',
+            'content-type': 'application/x-www-form-urlencoded',
+            'accept-language': 'zh-cn',
+            'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E217 MicroMessenger/6.8.0(0x16080000) NetType/WIFI Language/en Branch/Br_trunk MiniProgramEnv/Mac',
+            'referer': 'https://servicewechat.com/wx08c7ede18f448973/20/page-frame.html',
+            'Cookie': '0a52___multiid=1; 0a52_logout=; PHPSESSID=d7eaef273b29b6e2a2b0130b0bfb92bc'
         }
-        for k, v in video_dict.items():
-            Common.logger(self.log_type, self.crawler).info(f"{k}:{v}")
-        Common.logging(
-            self.log_type, self.crawler, self.env, f"{video_dict}"
-        )
-        # 过滤无效视频
-        if video_title == "" or video_dict["video_id"] == "":
-            Common.logger(self.log_type, self.crawler).info("无效视频\n")
-            Common.logging(self.log_type, self.crawler, self.env, "无效视频\n")
-            # 抓取基础规则过滤
-        elif (
-                download_rule(
-                    log_type=self.log_type,
-                    crawler=self.crawler,
-                    video_dict=video_dict,
-                    rule_dict=self.rule_dict,
-                )
-                is False
-        ):
-            Common.logger(self.log_type, self.crawler).info("不满足抓取规则\n")
-            Common.logging(
-                self.log_type, self.crawler, self.env, "不满足抓取规则\n"
-            )
-        elif (
-                any(
-                    str(word)
-                    if str(word) in video_dict["video_title"]
-                    else False
-                    for word in get_config_from_mysql(
-                        log_type=self.log_type,
-                        source=self.crawler,
+        page_index = 1
+        while True:
+            time.sleep(random.randint(1, 10))
+            try:
+                if self.limit_flag:
+                    AliyunLogger.logging(
+                        code="2000",
+                        platform=self.platform,
+                        mode=self.mode,
                         env=self.env,
-                        text="filter",
-                        action="",
+                        message="本轮已经抓取到足够的数据,自动退出\t{}".format(self.download_cnt),
                     )
+                    return
+                else:
+                    payload = "category=476&page=1&israndom=1&type=0&isview=&noauth=true"
+
+                    response = requests.post(url, headers=headers, data=payload)
+                    video_list = response.json()["data"]
+                    if video_list:
+                        for index, video_obj in enumerate(video_list, 1):
+                            try:
+                                video_id = video_obj.get("vid", 0)
+                                get_video_list = self.video_list(video_id)
+
+                                if get_video_list.get("vtitle"):
+                                    AliyunLogger.logging(
+                                        code="1001",
+                                        platform=self.platform,
+                                        mode=self.mode,
+                                        env=self.env,
+                                        message="扫描到一条视频",
+                                        data=video_obj,
+                                    )
+                                    self.process_video_obj(video_obj, get_video_list)
+                            except Exception as e:
+                                AliyunLogger.logging(
+                                    code="3000",
+                                    platform=self.platform,
+                                    mode=self.mode,
+                                    env=self.env,
+                                    data=video_obj,
+                                    message="抓取第{}条的时候出现问题, 报错信息是{}".format(index, e),
+                                )
+                            page_index += 1
+                    else:
+                        AliyunLogger.logging(
+                            code="2000",
+                            platform=self.platform,
+                            mode=self.mode,
+                            env=self.env,
+                            message="已经抓完了,自动退出"
+                        )
+                        return
+            except Exception as e:
+                AliyunLogger.logging(
+                    code="3000",
+                    platform=self.platform,
+                    mode=self.mode,
+                    env=self.env,
+                    message="抓取第{}页时候出现错误, 报错信息是{}".format(page_index + 1, e),
                 )
-                is True
-        ):
-            Common.logger(self.log_type, self.crawler).info("已中过滤词\n")
-            Common.logging(self.log_type, self.crawler, self.env, "已中过滤词\n")
-        elif self.repeat_video(video_dict["video_id"]) != 0:
-            Common.logger(self.log_type, self.crawler).info("视频已下载\n")
-            Common.logging(self.log_type, self.crawler, self.env, "视频已下载\n")
-        else:
-            video_url = get_video_list['res']
-            video_url = get_redirect_url(video_url)
 
-            video_dict["out_user_id"] = video_dict["profile_id"]
-            video_dict["platform"] = self.crawler
-            video_dict["strategy"] = self.log_type
-            video_dict["out_video_id"] = str(video_dict["video_id"])
-            video_dict["width"] = video_dict["video_width"]
-            video_dict["height"] = video_dict["video_height"]
-            video_dict["crawler_rule"] = json.dumps(self.rule_dict)
-            video_dict["user_id"] = self.our_uid
-            video_dict["publish_time"] = video_dict["publish_time_str"]
-            video_dict["video_url"] = video_url
-            video_dict["avatar_url"] = video_obj['poster']
-            video_dict["cover_url"] = video_obj['poster']
-            self.download_count += 1
-            self.mq.send_msg(video_dict)
+    def process_video_obj(self, video_obj, get_video_list):
+        trace_id = self.platform + str(uuid.uuid1())
+        our_user = random.choice(self.user_list)
+        item = VideoItem()
+        item.add_video_info("user_id", our_user["uid"])
+        item.add_video_info("user_name", our_user["nick_name"])
+        item.add_video_info("video_id", video_obj["vid"])
+        item.add_video_info("video_title", video_obj["vtitle"])
+        item.add_video_info("publish_time_stamp", int(time.time()))
+        item.add_video_info("video_url", get_video_list["res"])
+        item.add_video_info("cover_url", video_obj["poster"])
+        item.add_video_info("out_video_id", video_obj["vid"])
+        item.add_video_info("platform", self.platform)
+        item.add_video_info("strategy", self.mode)
+        item.add_video_info("session", "{}-{}".format(self.platform, int(time.time())))
+        mq_obj = item.produce_item()
+        pipeline = PiaoQuanPipeline(
+            platform=self.platform,
+            mode=self.mode,
+            rule_dict=self.rule_dict,
+            env=self.env,
+            item=mq_obj,
+            trace_id=trace_id,
+        )
+        if pipeline.process_item():
+            self.download_cnt += 1
+            self.mq.send_msg(mq_obj)
+            AliyunLogger.logging(
+                code="1002",
+                platform=self.platform,
+                mode=self.mode,
+                env=self.env,
+                message="成功发送至 ETL",
+                data=mq_obj,
+            )
+            if self.download_cnt >= int(
+                self.rule_dict.get("videos_cnt", {}).get("min", 200)
+            ):
+                self.limit_flag = True
 
 
-if __name__ == "__main__":
-    ZL = MHXHspcheduling(
-        log_type="recommend",
-        crawler="mhxh",
+if __name__ == '__main__':
+    S = MHXHspcheduling(
+        platform="meihaoxinghe",
+        mode="recommend",
+        env="dev",
         rule_dict={},
-        our_uid="zhangyong",
-        env="dev"
+        user_list=[{'nick_name': "Ivring", 'uid': "1997"}, {'nick_name': "paul", 'uid': "1998"}]
     )
-    ZL.get_videoList()
-
+    S.get_video_list()

部分文件因文件數量過多而無法顯示