# -*- coding: utf-8 -*- # @Author: wangkun # @Time: 2023/4/13 # -*- coding: utf-8 -*- # @Author: wangkun # @Time: 2022/4/25 import json import os import sys import time import uuid from urllib import parse import requests import urllib3 sys.path.append(os.getcwd()) from common.mq import MQ from common.common import Common from common.scheduling_db import MysqlHelper from common.public import get_config_from_mysql, download_rule from common.aliyun_log import AliyunLogger proxies = {"http": None, "https": None} class BenshanzhufuRecommend: platform = "本山祝福" @classmethod def repeat_video(cls, log_type, crawler, video_id, env): sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """ repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env) return len(repeat_video) # 推荐列表获取视频 @classmethod def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env): mq = MQ(topic_name="topic_crawler_etl_" + env) # 翻页参数 visitor_key = "" page = 1 while True: # try: now = int(time.time() * 1000) url = "https://bszf.wentingyou.cn/index.php/v111/index/index?parameter=" header = { "content-time": str(now), "chatKey": "wx0fb8149da961d3b0", "cache-time": str(now), "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) " "AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 " "MicroMessenger/8.0.20(0x1800142d) NetType/WIFI Language/zh_CN", "Referer": "https://servicewechat.com/wx0fb8149da961d3b0/2/page-frame.html" } parameter = { "page": page, "ini_id": visitor_key } params = parse.quote(json.dumps(parameter)) url = url + str(params) urllib3.disable_warnings() r = requests.get(headers=header, url=url, proxies=proxies, verify=False) if r.status_code != 200: Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.text}\n") Common.logging(log_type, crawler, env, f"get_videoList:{r.status_code}, {r.text}\n") return elif r.json()['message'] != "list success": Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.json()}\n") Common.logging(log_type, crawler, env, f"get_videoList:{r.status_code}, {r.text}\n") return elif "data" not in r.json(): Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.json()}\n") Common.logging(log_type, crawler, env, f"get_videoList:{r.status_code}, {r.text}\n") return elif len(r.json()['data']["list"]) == 0: Common.logger(log_type, crawler).info(f"没有更多数据了~ {r.json()}\n") Common.logging(log_type, crawler, env, f"没有更多数据了~ {r.json()}\n") return else: # 翻页 visitor_key = r.json()["data"]["visitor_key"] page += 1 feeds = r.json()["data"]["list"] for i in range(len(feeds)): trace_id = crawler + str(uuid.uuid1()) AliyunLogger.logging( code="1001", platform=crawler, mode=log_type, env=env, data=feeds[i], message="扫描到一条视频" ) publish_time_stamp = feeds[i].get("update_time", 0) publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp)) video_url = feeds[i].get("video_url", "") if ".mp4" not in video_url: video_url = "" video_dict = { 'video_title': feeds[i].get("title", "").replace(" ", "").replace("'", "").replace('"', ""), 'video_id': str(feeds[i].get("nid", "")), 'play_cnt': 0, 'comment_cnt': feeds[i].get("commentCount", 0), 'like_cnt': 0, 'share_cnt': 0, 'publish_time_stamp': publish_time_stamp, 'publish_time_str': publish_time_str, 'user_name': "本山祝福", 'user_id': "benshanzhufu", 'avatar_url': feeds[i].get("video_cover", ""), 'cover_url': feeds[i].get("video_cover", ""), 'video_url': video_url, 'session': f"benshanzhufu-{int(time.time())}" } for k, v in video_dict.items(): Common.logger(log_type, crawler).info(f"{k}:{v}") Common.logging(log_type, crawler, env, f"video_dict:{video_dict}") # 过滤无效视频 if video_dict["video_id"] == "" or video_dict["cover_url"] == "" or video_dict["video_url"] == "": Common.logger(log_type, crawler).info("无效视频\n") Common.logging(log_type, crawler, env, "无效视频\n") elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False: Common.logger(log_type, crawler).info("不满足抓取规则\n") Common.logging(log_type, crawler, env, "不满足抓取规则\n") elif any(str(word) if str(word) in video_dict["video_title"] else False for word in get_config_from_mysql(log_type=log_type, source=crawler, env=env, text="filter", action="")) is True: Common.logger(log_type, crawler).info('已中过滤词\n') Common.logging(log_type, crawler, env, '已中过滤词\n') elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0: Common.logger(log_type, crawler).info('视频已下载\n') Common.logging(log_type, crawler, env, '视频已下载\n') AliyunLogger.logging( code="2002", platform=crawler, mode=log_type, message="重复的视频", data=video_dict, trace_id=trace_id, env=env ) else: video_dict["out_user_id"] = video_dict["user_id"] video_dict["platform"] = crawler video_dict["strategy"] = log_type video_dict["out_video_id"] = video_dict["video_id"] video_dict["width"] = 0 video_dict["height"] = 0 video_dict["crawler_rule"] = json.dumps(rule_dict) video_dict["user_id"] = our_uid video_dict["publish_time"] = video_dict["publish_time_str"] video_dict["fans_cnt"] = 0 video_dict["videos_cnt"] = 0 mq.send_msg(video_dict) AliyunLogger.logging( code="1002", platform=crawler, mode=log_type, message="成功发送至 ETL", data=video_dict, trace_id=trace_id, env=env ) # except Exception as e: # Common.logger(log_type, crawler).info(f"抓取单条视频异常:{e}\n") # except Exception as e: # Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n") if __name__ == "__main__": print(get_config_from_mysql("recommend", "benshanzhufu", "dev", "filter")) pass