|
@@ -0,0 +1,155 @@
|
|
|
|
+import json
|
|
|
|
+import time
|
|
|
|
+import uuid
|
|
|
|
+
|
|
|
|
+import requests
|
|
|
|
+
|
|
|
|
+from common.common import Common
|
|
|
|
+from common.scheduling_db import MysqlHelper
|
|
|
|
+from common.mq import MQ
|
|
|
|
+from common.public import download_rule, get_config_from_mysql
|
|
|
|
+
|
|
|
|
+proxies = {"http": None, "https": None}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+class ZfshRecommend:
|
|
|
|
+ platform = ("祝福生活视频")
|
|
|
|
+
|
|
|
|
+ @classmethod
|
|
|
|
+ def repeat_video(cls, log_type, crawler, video_id, env):
|
|
|
|
+ sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and create_time>='2023-06-26' and out_video_id="{video_id}"; """
|
|
|
|
+ repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
|
|
|
|
+ return len(repeat_video)
|
|
|
|
+
|
|
|
|
+ @classmethod
|
|
|
|
+ def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
|
|
|
|
+ mq = MQ(topic_name="topic_crawler_etl_" + env)
|
|
|
|
+ uuid1 = str(uuid.uuid1())
|
|
|
|
+ for page in range(1, 100):
|
|
|
|
+ try:
|
|
|
|
+ Common.logger(log_type, crawler).info(f"正在抓取第{page}页")
|
|
|
|
+ Common.logging(log_type, crawler, env, f"正在抓取第{page}页")
|
|
|
|
+ url = "https://mini.vvuiiu.cn/article/getArticleList"
|
|
|
|
+ payload = json.dumps({
|
|
|
|
+ "page": page,
|
|
|
|
+ "size": 5,
|
|
|
|
+ "category_id": "393774", # 每日推荐id
|
|
|
|
+ "from_type": 1,
|
|
|
|
+ "uuid": uuid1,
|
|
|
|
+ # "openid": "oY5tI5FgvH9Jmc8cntj81t5Ugsds",
|
|
|
|
+ "platform": 35,
|
|
|
|
+ "appid": "wxa903dc775e85eb5b"
|
|
|
|
+ })
|
|
|
|
+ headers = {
|
|
|
|
+ 'Host': 'mini.vvuiiu.cn',
|
|
|
|
+ 'accept': '*/*',
|
|
|
|
+ 'content-type': 'application/json',
|
|
|
|
+ 'accept-language': 'zh-cn',
|
|
|
|
+ 'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E217 MicroMessenger/6.8.0(0x16080000) NetType/WIFI Language/en Branch/Br_trunk MiniProgramEnv/Mac',
|
|
|
|
+ 'referer': 'https://servicewechat.com/wxa903dc775e85eb5b/11/page-frame.html'
|
|
|
|
+ }
|
|
|
|
+ r = requests.post(url=url, headers=headers, data=payload, proxies=proxies, verify=False)
|
|
|
|
+
|
|
|
|
+ if "data" not in r.text or r.status_code != 200:
|
|
|
|
+ Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
|
|
|
|
+ Common.logging(log_type, crawler, env, f"get_videoList:{r.text}\n")
|
|
|
|
+ return
|
|
|
|
+ elif "data" not in r.json():
|
|
|
|
+ Common.logger(log_type, crawler).info(f"get_videoList:{r.json()}\n")
|
|
|
|
+ Common.logging(log_type, crawler, env, f"get_videoList:{r.json()}\n")
|
|
|
|
+ return
|
|
|
|
+ elif "list" not in r.json()["data"]:
|
|
|
|
+ Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']}\n")
|
|
|
|
+ Common.logging(log_type, crawler, env, f"get_videoList:{r.json()['data']}\n")
|
|
|
|
+ return
|
|
|
|
+ elif len(r.json()["data"]["list"]) == 0:
|
|
|
|
+ Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']['list']}\n")
|
|
|
|
+ Common.logging(log_type, crawler, env, f"get_videoList:{r.json()['data']['list']}\n")
|
|
|
|
+ return
|
|
|
|
+ else:
|
|
|
|
+ # 视频列表
|
|
|
|
+ feeds = r.json()["data"]["list"]
|
|
|
|
+ for i in range(len(feeds)):
|
|
|
|
+ try:
|
|
|
|
+ video_title = feeds[i].get("title", "").strip().replace("\n", "") \
|
|
|
|
+ .replace("/", "").replace("\\", "").replace("\r", "") \
|
|
|
|
+ .replace(":", "").replace("*", "").replace("?", "") \
|
|
|
|
+ .replace("?", "").replace('"', "").replace("<", "") \
|
|
|
|
+ .replace(">", "").replace("|", "").replace(" ", "") \
|
|
|
|
+ .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
|
|
|
|
+ .replace("'", "").replace("#", "").replace("Merge", "")
|
|
|
|
+ publish_time_stamp = int(feeds[i].get("article_id", 0))
|
|
|
|
+ publish_time_str = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(publish_time_stamp))
|
|
|
|
+ video_dict = {
|
|
|
|
+ "video_title": video_title,
|
|
|
|
+ "video_id": feeds[i]["id"], # 视频id
|
|
|
|
+ "publish_time_stamp": publish_time_stamp,
|
|
|
|
+ "publish_time_str": publish_time_str,
|
|
|
|
+ "is_video": int(feeds[i].get("is_video", 0)), # 类型
|
|
|
|
+ "category_id": int(feeds[i].get("category_id", 0)), # 视频来源(精彩推荐)
|
|
|
|
+ "image_path": feeds[i].get("image_path", ""), # 视频封面
|
|
|
|
+ "video_url": feeds[i].get("video_url", ""), # 视频链接
|
|
|
|
+ "click": int(feeds[i].get("click", 0)), # 点击数
|
|
|
|
+ "video_width": int(feeds[i].get("vw", 0)),
|
|
|
|
+ "video_height": int(feeds[i].get("vh", 0)),
|
|
|
|
+ "user_name": feeds[i].get("source", "").strip().replace("\n", ""),
|
|
|
|
+ "user_id": feeds[i].get("openid", ""),
|
|
|
|
+ "play_cnt": 0,
|
|
|
|
+ "like_cnt": 0,
|
|
|
|
+ "comment_cnt": 0,
|
|
|
|
+ "share_cnt": 0,
|
|
|
|
+ # "duration": feeds[i].get("mediaDuration", 0),
|
|
|
|
+ "session": ""
|
|
|
|
+
|
|
|
|
+ }
|
|
|
|
+ for k, v in video_dict.items():
|
|
|
|
+ Common.logger(log_type, crawler).info(f"{k}:{v}")
|
|
|
|
+ Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
|
|
|
|
+
|
|
|
|
+ if video_dict["video_id"] == "" or video_dict["video_title"] == "" or video_dict[
|
|
|
|
+ "video_url"] == "":
|
|
|
|
+ Common.logger(log_type, crawler).info("无效视频\n")
|
|
|
|
+ Common.logging(log_type, crawler, env, "无效视频\n")
|
|
|
|
+ elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict,
|
|
|
|
+ rule_dict=rule_dict) is False:
|
|
|
|
+ Common.logger(log_type, crawler).info("不满足抓取规则\n")
|
|
|
|
+ Common.logging(log_type, crawler, env, "不满足抓取规则\n")
|
|
|
|
+ elif any(str(word) if str(word) in video_dict["video_title"] else False
|
|
|
|
+ for word in get_config_from_mysql(log_type=log_type,
|
|
|
|
+ source=crawler,
|
|
|
|
+ env=env,
|
|
|
|
+ text="filter",
|
|
|
|
+ action="")) is True:
|
|
|
|
+ Common.logger(log_type, crawler).info('已中过滤词\n')
|
|
|
|
+ Common.logging(log_type, crawler, env, '已中过滤词\n')
|
|
|
|
+ elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
|
|
|
|
+ Common.logger(log_type, crawler).info('视频已下载\n')
|
|
|
|
+ Common.logging(log_type, crawler, env, '视频已下载\n')
|
|
|
|
+
|
|
|
|
+ else:
|
|
|
|
+ video_dict["out_user_id"] = video_dict["user_id"]
|
|
|
|
+ video_dict["platform"] = crawler
|
|
|
|
+ video_dict["strategy"] = log_type
|
|
|
|
+ video_dict["out_video_id"] = video_dict["video_id"]
|
|
|
|
+ video_dict["width"] = video_dict["video_width"]
|
|
|
|
+ video_dict["height"] = video_dict["video_height"]
|
|
|
|
+ video_dict["crawler_rule"] = json.dumps(rule_dict)
|
|
|
|
+ video_dict["user_id"] = our_uid
|
|
|
|
+ video_dict["publish_time"] = video_dict["publish_time_str"]
|
|
|
|
+ mq.send_msg(video_dict)
|
|
|
|
+ except Exception as e:
|
|
|
|
+ Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
|
|
|
|
+ Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
|
|
|
|
+ except Exception as e:
|
|
|
|
+ Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
|
|
|
|
+ Common.logging(log_type, crawler, env, f"抓取第{page}页时异常:{e}\n")
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+if __name__ == "__main__":
|
|
|
|
+ rule_dict1 = {"period": {"min": 365, "max": 365},
|
|
|
|
+ "duration": {"min": 30, "max": 1800},
|
|
|
|
+ "favorite_cnt": {"min": 0, "max": 0},
|
|
|
|
+ "videos_cnt": {"min": 10, "max": 20},
|
|
|
|
+ "share_cnt": {"min": 0, "max": 0}}
|
|
|
|
+ ZfshRecommend.get_videoList("recommend", "zhufushenghuo,", "16QspO", rule_dict1, 'dev')
|