Bladeren bron

新增“花好月圆中老年”爬虫以及相关代码

罗俊辉 1 jaar geleden
bovenliggende
commit
51e950f408

+ 0 - 0
huahaoyueyuanzhonglaonian/__init__.py


+ 0 - 0
huahaoyueyuanzhonglaonian/huahaoyueyuanzhonglaonian_main/__init__.py


+ 158 - 0
huahaoyueyuanzhonglaonian/huahaoyueyuanzhonglaonian_main/run_hhyyzln_recommend.py

@@ -0,0 +1,158 @@
+# -*- coding: utf-8 -*-
+# @Author: luojunhui
+# @Time: 2023/10/10
+import argparse
+import time
+import random
+
+from mq_http_sdk.mq_client import *
+from mq_http_sdk.mq_consumer import *
+from mq_http_sdk.mq_exception import MQExceptionBase
+
+sys.path.append(os.getcwd())
+from common.public import get_consumer, ack_message, task_fun_mq
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from huahaoyueyuanzhonglaonian.huahaoyueyuanzhonglaonian_recommend import HHYYZLNScheduling
+
+
+def main(log_type, crawler, topic_name, group_id, env):
+    consumer = get_consumer(topic_name, group_id)
+    # 长轮询表示如果Topic没有消息,则客户端请求会在服务端挂起3秒,3秒内如果有消息可以消费则立即返回响应。
+    # 长轮询时间3秒(最多可设置为30秒)。
+    wait_seconds = 30
+    # 一次最多消费3条(最多可设置为16条)。
+    batch = 1
+    Common.logger(log_type, crawler).info(
+        f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+        f"WaitSeconds:{wait_seconds}\n"
+        f"TopicName:{topic_name}\n"
+        f"MQConsumer:{group_id}"
+    )
+    Common.logging(
+        log_type,
+        crawler,
+        env,
+        f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+        f"WaitSeconds:{wait_seconds}\n"
+        f"TopicName:{topic_name}\n"
+        f"MQConsumer:{group_id}",
+    )
+    while True:
+        try:
+            # 长轮询消费消息。
+            recv_msgs = consumer.consume_message(batch, wait_seconds)
+            for msg in recv_msgs:
+                xng_author_start_time = int(time.time())
+                Common.logger(log_type, crawler).info(
+                    f"Receive\n"
+                    f"MessageId:{msg.message_id}\n"
+                    f"MessageBodyMD5:{msg.message_body_md5}\n"
+                    f"MessageTag:{msg.message_tag}\n"
+                    f"ConsumedTimes:{msg.consumed_times}\n"
+                    f"PublishTime:{msg.publish_time}\n"
+                    f"Body:{msg.message_body}\n"
+                    f"NextConsumeTime:{msg.next_consume_time}\n"
+                    f"ReceiptHandle:{msg.receipt_handle}\n"
+                    f"Properties:{msg.properties}"
+                )
+                Common.logging(
+                    log_type,
+                    crawler,
+                    env,
+                    f"Receive\n"
+                    f"MessageId:{msg.message_id}\n"
+                    f"MessageBodyMD5:{msg.message_body_md5}\n"
+                    f"MessageTag:{msg.message_tag}\n"
+                    f"ConsumedTimes:{msg.consumed_times}\n"
+                    f"PublishTime:{msg.publish_time}\n"
+                    f"Body:{msg.message_body}\n"
+                    f"NextConsumeTime:{msg.next_consume_time}\n"
+                    f"ReceiptHandle:{msg.receipt_handle}\n"
+                    f"Properties:{msg.properties}",
+                )
+                # ack_mq_message
+                ack_message(
+                    log_type=log_type,
+                    crawler=crawler,
+                    recv_msgs=recv_msgs,
+                    consumer=consumer,
+                )
+
+                # 处理爬虫业务
+                task_dict = task_fun_mq(msg.message_body)["task_dict"]
+                rule_dict = task_fun_mq(msg.message_body)["rule_dict"]
+                task_id = task_dict["id"]
+                select_user_sql = (
+                    f"""select * from crawler_user_v3 where task_id={task_id}"""
+                )
+                user_list = MysqlHelper.get_values(
+                    log_type, crawler, select_user_sql, env, action=""
+                )
+                Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
+                Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
+                Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
+                Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}")
+                Common.logger(log_type, crawler).info(f"用户列表:\n{user_list}")
+                Common.logging(log_type, crawler, env, f"用户列表:\n{user_list}")
+                Common.logger(log_type, crawler).info(f'开始抓取:{task_dict["taskName"]}\n')
+                Common.logging(
+                    log_type, crawler, env, f'开始抓取:{task_dict["taskName"]}\n'
+                )
+                our_uid_list = []
+                for user in user_list:
+                    our_uid_list.append(user["uid"])
+                our_uid = random.choice(our_uid_list)
+                HH = HHYYZLNScheduling(
+                    log_type=log_type,
+                    crawler=crawler,
+                    rule_dict=rule_dict,
+                    env=env,
+                    our_uid=our_uid
+                )
+                for i in range(10):
+                    if HH.download_count >= int(rule_dict.get("videos_cnt", {}).get("min", 10)):
+                        HH.download_count = 0
+                        break
+                    else:
+                        HH.get_videoList(i + 1, 10)
+                        time.sleep(60)
+                Common.logger(log_type, crawler).info("抓取一轮结束\n")
+                Common.logging(log_type, crawler, env, "抓取一轮结束\n")
+
+        except MQExceptionBase as err:
+            # Topic中没有消息可消费。
+            if err.type == "MessageNotExist":
+                Common.logger(log_type, crawler).info(
+                    f"No new message! RequestId:{err.req_id}\n"
+                )
+                Common.logging(
+                    log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n"
+                )
+                continue
+
+            Common.logger(log_type, crawler).info(
+                f"Consume Message Fail! Exception:{err}\n"
+            )
+            Common.logging(
+                log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n"
+            )
+            time.sleep(2)
+            continue
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument("--log_type", type=str)  ## 添加参数,注明参数类型
+    parser.add_argument("--crawler")  ## 添加参数
+    parser.add_argument("--topic_name")  ## 添加参数
+    parser.add_argument("--group_id")  ## 添加参数
+    parser.add_argument("--env")  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(
+        log_type=args.log_type,
+        crawler=args.crawler,
+        topic_name=args.topic_name,
+        group_id=args.group_id,
+        env=args.env,
+    )

+ 1 - 0
huahaoyueyuanzhonglaonian/huahaoyueyuanzhonglaonian_recommend/__init__.py

@@ -0,0 +1 @@
+from .huahaoyueyuanzhonglaonian_scheduling import HHYYZLNScheduling

+ 223 - 0
huahaoyueyuanzhonglaonian/huahaoyueyuanzhonglaonian_recommend/huahaoyueyuanzhonglaonian_scheduling.py

@@ -0,0 +1,223 @@
+# -*- coding: utf-8 -*-
+# @Author: luojunhui
+# @Time: 2023/10/10
+import json
+import os
+import random
+import sys
+import time
+import requests
+from hashlib import md5
+from datetime import datetime
+
+from common.mq import MQ
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from common.public import get_config_from_mysql, download_rule
+
+proxies = {"http": None, "https": None}
+
+
+def clean_title(strings):
+    return (
+        strings.strip()
+        .replace("\n", "")
+        .replace("/", "")
+        .replace("\r", "")
+        .replace("#", "")
+        .replace(".", "。")
+        .replace("\\", "")
+        .replace("&NBSP", "")
+        .replace(":", "")
+        .replace("*", "")
+        .replace("?", "")
+        .replace("?", "")
+        .replace('"', "")
+        .replace("<", "")
+        .replace(">", "")
+        .replace("|", "")
+        .replace(" ", "")
+        .replace('"', "")
+        .replace("'", "")
+    )
+
+
+class HHYYZLNScheduling:
+    def __init__(self, log_type, crawler, rule_dict, env, our_uid):
+        self.platform = "花好月圆中老年"
+        self.log_type = log_type
+        self.crawler = crawler
+        self.rule_dict = rule_dict
+        self.env = env
+        self.our_uid = our_uid
+        self.mq = MQ(topic_name="topic_crawler_etl_" + self.env)
+        self.download_count = 0
+
+    def repeat_video(self, video_id):
+        sql = f""" select * from crawler_video where platform in ("{self.crawler}","{self.platform}") and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(
+            self.log_type, self.crawler, sql, self.env
+        )
+        return len(repeat_video)
+
+    # 获取视频id_list
+    def get_videoList(self, page_id, page_limit):
+        time.sleep(random.randint(5, 10))
+        url = "https://www.angjukk.cn/index/home/get_home_list.html"
+        headers = {
+            "host": "www.angjukk.cn",
+            "xweb_xhr": "1",
+            "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 MicroMessenger/6.8.0(0x16080000) NetType/WIFI MiniProgramEnv/Mac MacWechat/WMPF XWEB/30817",
+            "content-type": "application/x-www-form-urlencoded",
+            "accept": "*/*",
+            "sec-fetch-site": "cross-site",
+            "sec-fetch-mode": "cors",
+            "sec-fetch-dest": "empty",
+            "referer": "https://servicewechat.com/wx742465c143d1bd2b/2/page-frame.html",
+            "accept-language": "en",
+        }
+        data = {
+            "time": "1696991482000",
+            "str_data": "A7ZHUDdb",
+            "page": str(page_id),
+            "limit": str(page_limit),
+            "appid": "wx742465c143d1bd2b",
+            # wx742465c143d1bd2b
+            "version": "1.4.2",
+            "openid": "ogEOH5cHAMpi8qrWle_vjtaqT6zw",
+        }
+        response = requests.post(url, headers=headers, data=data)
+        if "data" not in response.text or response.status_code != 200:
+            Common.logger(self.log_type, self.crawler).info(
+                f"get_videoList:{response.text}\n"
+            )
+            Common.logging(
+                self.log_type,
+                self.crawler,
+                self.env,
+                f"get_videoList:{response.text}\n",
+            )
+            return
+        elif len(response.json()["data"]["video_list"]) == 0:
+            Common.logger(self.log_type, self.crawler).info(f"没有更多数据啦~\n")
+            Common.logging(self.log_type, self.crawler, self.env, f"没有更多数据啦~\n")
+            return
+        else:
+            data_list = response.json()["data"]["video_list"]['data']
+            for video_obj in data_list:
+                try:
+                    self.process_video_obj(video_obj)
+                except Exception as e:
+                    Common.logger(self.log_type, self.crawler).error(f"抓取单条视频异常:{e}\n")
+                    Common.logging(
+                        self.log_type, self.crawler, self.env, f"抓取单条视频异常:{e}\n"
+                    )
+
+    def process_video_obj(self, video_obj):
+        # print(type(video_obj))
+        video_id = video_obj.get("id", 0)
+        video_title = clean_title(video_obj.get("title", "no title"))
+        video_time = video_obj.get("v_time", 0)
+        publish_time_stamp = video_obj.get("createtime", 0)
+        date_object = datetime.strptime(publish_time_stamp, "%Y-%m-%d")
+        publish_time_stamp = int(time.mktime(date_object.timetuple()))
+        publish_time_str = time.strftime(
+            "%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp)
+        )
+        user_name = ""
+        video_dict = {
+            "video_title": video_title,
+            "video_id": video_id,
+            "duration": video_time,
+            "play_cnt": video_obj.get("browse", 0),
+            "like_cnt": 0,
+            "comment_cnt": 0,
+            "share_cnt": 0,
+            "user_name": user_name,
+            "publish_time_stamp": publish_time_stamp,
+            "publish_time_str": publish_time_str,
+            "video_width": 0,
+            "video_height": 0,
+            "profile_id": 0,
+            "profile_mid": 0,
+            # "cover_url": "",
+            "session": f"huahaoyueyuanzhonglaonian-{int(time.time())}",
+        }
+        for k, v in video_dict.items():
+            Common.logger(self.log_type, self.crawler).info(f"{k}:{v}")
+        Common.logging(
+            self.log_type, self.crawler, self.env, f"{video_dict}"
+        )
+        # 过滤无效视频
+        if video_title == "" or video_dict["video_id"] == "":
+            Common.logger(self.log_type, self.crawler).info("无效视频\n")
+            Common.logging(self.log_type, self.crawler, self.env, "无效视频\n")
+            # 抓取基础规则过滤
+        elif (
+                download_rule(
+                    log_type=self.log_type,
+                    crawler=self.crawler,
+                    video_dict=video_dict,
+                    rule_dict=self.rule_dict,
+                )
+                is False
+        ):
+            Common.logger(self.log_type, self.crawler).info("不满足抓取规则\n")
+            Common.logging(
+                self.log_type, self.crawler, self.env, "不满足抓取规则\n"
+            )
+        elif (
+                any(
+                    str(word)
+                    if str(word) in video_dict["video_title"]
+                    else False
+                    for word in get_config_from_mysql(
+                        log_type=self.log_type,
+                        source=self.crawler,
+                        env=self.env,
+                        text="filter",
+                        action="",
+                    )
+                )
+                is True
+        ):
+            Common.logger(self.log_type, self.crawler).info("已中过滤词\n")
+            Common.logging(self.log_type, self.crawler, self.env, "已中过滤词\n")
+        elif self.repeat_video(video_dict["video_id"]) != 0:
+            Common.logger(self.log_type, self.crawler).info("视频已下载\n")
+            Common.logging(self.log_type, self.crawler, self.env, "视频已下载\n")
+        else:
+            # out_video_id = md5(video_title.encode('utf8')).hexdigest()
+            # out_user_id = md5(user_name.encode('utf8')).hexdigest()
+            video_dict["out_user_id"] = video_dict["profile_id"]
+            video_dict["platform"] = self.crawler
+            video_dict["strategy"] = self.log_type
+            video_dict["out_video_id"] = str(video_dict["video_id"])
+            video_dict["width"] = video_dict["video_width"]
+            video_dict["height"] = video_dict["video_height"]
+            video_dict["crawler_rule"] = json.dumps(self.rule_dict)
+            video_dict["user_id"] = self.our_uid
+            video_dict["publish_time"] = video_dict["publish_time_str"]
+            video_dict["video_url"] = video_obj['url']
+            video_dict["avatar_url"] = video_obj['thumb']
+            video_dict["cover_url"] = video_obj['share_thumb']
+            # print(json.dumps(video_dict, ensure_ascii=False, indent=4))
+            self.download_count += 1
+            self.mq.send_msg(video_dict)
+
+
+
+
+if __name__ == "__main__":
+    ZL = HHYYZLNScheduling(
+        log_type="recommend",
+        crawler="hhyyzln",
+        rule_dict={},
+        our_uid="luojunhuihaoshuai",
+        env="dev"
+    )
+    for i in range(4):
+        ZL.get_videoList(page_id=i+1, page_limit=10)
+        print(ZL.download_count)