import os import sys import json import random import uuid import time import traceback from datetime import datetime, timedelta import requests sys.path.append(os.getcwd()) from application.common.feishu import FsData from application.common.feishu.feishu_utils import FeishuUtils from application.common.gpt import GPT4oMini from application.common.messageQueue import MQ from application.common.log import AliyunLogger from application.functions.zqkd_db_redis import DatabaseOperations, RedisOperations from application.items import VideoItem from application.pipeline import PiaoQuanPipeline from application.common.log import Local class ZhongQingKanDianAuthor: API_BASE_URL = "http://8.217.192.46:8889" COMMON_HEADERS = { "Content-Type": "application/json" } # 最大重试次数 MAX_RETRIES = 3 # 最大等待时长 TIMEOUT = 30 def __init__(self, platform, mode, rule_dict, user_list, env="prod"): """ 初始化 :param platform: 平台名称 zhongqingkandian :param mode: 运行模式 recommend :param rule_dict: 规则字典,包含视频数量限制、时长限制等规则 [{"videos_cnt":{"min":100,"max":0}},{"duration":{"min":30,"max":1200}}] :param user_list: 用户列表 :param env: 运行环境,默认为 "prod" """ self.limit_flag = True self.platform = platform self.mode = mode self.rule_dict = rule_dict self.user_list = user_list self.env = env self.download_cnt = 0 self.mq = MQ(topic_name="topic_crawler_etl_" + self.env) self.expire_flag = False self.aliyun_log = AliyunLogger(mode=self.mode, platform=self.platform) self.db_ops = DatabaseOperations(mode=mode, platform=platform) self.redis_ops = RedisOperations(mode=mode, platform=platform) data_rule = FsData() self.title_rule = data_rule.get_title_rule() self.LocalLog = Local.logger(self.platform, self.mode) self.curses = 1 result = self.redis_ops.get_last_scanned_id() # self.last_scanned_id = 0 if result is None else int(result) # self.zqkd_user_list = self.db_ops.select_user(self.last_scanned_id) # self.LocalLog.info(f"获取到的用户列表:{self.zqkd_user_list} \n 昨天最后扫描的用户ID{self.last_scanned_id}") self.session = requests.session() def __del__(self): if self.session: self.LocalLog.info("session 被正确关闭") self.session.close() def send_request(self, path, data): """发送带重试机制的API请求""" for attempt in range(self.MAX_RETRIES): try: response = self.session.post( f"{self.API_BASE_URL}{path}", data=data, timeout=self.TIMEOUT, headers=self.COMMON_HEADERS ) resp_data = response.json() # 检查响应格式 if 'code' not in resp_data: self.LocalLog.warning(f"{path}响应缺少code字段,尝试重试") raise ValueError("Missing 'code' in response") code = resp_data['code'] # 成功情况 (code=0) if code == 0: self.LocalLog.info(f"{path}请求成功:{resp_data}") return resp_data # 特定错误码不重试 if code == 29036: self.LocalLog.warning(f"{path}返回code:29036,消息:{resp_data}") return None # 其他错误码重试 self.LocalLog.warning(f"{path}返回错误码{code},尝试重试,响应内容:{resp_data}") except Exception as e: tb_info = traceback.format_exc() self.LocalLog.error(f"{path}请求异常: {str(e)} \n {tb_info}") if attempt < self.MAX_RETRIES - 1: time.sleep(random.randint(5, 10)) # 所有重试失败,记录错误并返回None self.LocalLog.error(f"{path}达到最大重试次数") self.aliyun_log.logging( code="3000", message=f"请求 {path} 失败,达到最大重试次数", data=data ) return None def req_user_list(self, account_id): """ 请求与指定内容 ID 相关的推荐列表。 :param :return: 相关推荐视频列表的有效响应数据,如果请求失败则返回 None """ try: url = '/crawler/zhong_qing_kan_dian/blogger' body = json.dumps({ "account_id": f"{account_id}", "content_type": "全部", "cursor": f"{self.curses}" }) self.LocalLog.info(f"开始请求用户视频列表{body}") resp = self.send_request(url, body) return resp except Exception as e: tb_info = traceback.format_exc() self.aliyun_log.logging( code="1004", message=f"请求相关推荐视频列表时发生异常,错误信息: {str(e)}", data={"url": url} ) self.LocalLog.info(f"请求相关推荐视频列表 {url} 时发生异常:{e} \n{tb_info}") return None def req_detail(self, content_link, **kwargs): """ 请求视频详情。 :param content_link: 视频内容链接 :param kwargs: 额外的视频信息 :return: 无返回值,处理视频详情信息 """ try: self.LocalLog.info(f"开始请求视频详情,链接: {content_link}") url = '/crawler/zhong_qing_kan_dian/detail' body = json.dumps({ "content_link": content_link }) resp = self.send_request(url, body) if not resp: return data = resp.get("data", {}).get("data", {}) if data.get("content_type") != "video": self.aliyun_log.logging( code="3003", message=f"跳过非视频内容", data={"content_link": content_link} ) self.LocalLog.info(f"跳过非视频内容,链接: {content_link}") return self.LocalLog.info(f"{content_link} 是视频") data.update(kwargs) self.process_video_obj(data) except Exception as e: tb_info = traceback.format_exc() self.aliyun_log.logging( code="1005", message=f"请求视频详情时发生异常,错误信息: {str(e)}", data={"content_link": content_link} ) self.LocalLog.error(f"请求视频详情,链接 {content_link} 时发生异常:{e} \n{tb_info}") def control_request_author(self): """ 控制相关推荐视频列表的请求和处理流程。 :return: 无返回值,根据下载数量限制控制流程 """ while self.limit_flag: try: self.download_cnt = self.db_ops.get_today_videos() if self.download_cnt >= self.rule_dict.get("videos_cnt", {}).get("min", 100): self.aliyun_log.logging( code="2010", message=f"今日已经达到最大量", data=self.download_cnt ) self.LocalLog.info(f"当日视频已达到最大爬取量{self.download_cnt}") return self.LocalLog.info(f"开始用户视频列表的请求和处理流程,今日已爬 {self.download_cnt} 个视频") if not self.db_ops.select_user(0): self.LocalLog.info("没有用户数据") time.sleep(10) continue for user_info in self.db_ops.select_user(0): if not self.limit_flag: self.aliyun_log.logging( code="2010", message=f"今日已经达到最大量", data=self.download_cnt ) self.LocalLog.info("视频数量已达到预期") return if is_less_than_30_minutes(): self.LocalLog.info("时间已不足,停止执行") return current_id, user_id = user_info author_resp = self.req_user_list(user_id) self.redis_ops.set_last_scanned_id(current_id) if not author_resp: continue author_data = author_resp.get("data", {}) # 判断是否有下一页 if not author_data["next_cursor"]: continue video_data = author_data.get("data", []) self.LocalLog.info(f"用户{user_id}第{self.curses}页数据长度{len(video_data)}") for video_obj in video_data: # if not self.limit_flag: # return video_content_link = video_obj.get("share_url") if video_content_link: self.req_detail(video_content_link, **video_obj) time.sleep(random.randint(5,10)) # self.redis_ops.set_last_scanned_id(0) # self.zqkd_user_list = self.db_ops.select_user(0) self.curses += 1 except Exception as e: tb_info = traceback.format_exc() self.aliyun_log.logging( code="3009", message=f"控制相关推荐视频请求和处理时发生异常,错误信息: {str(e)}", data={} ) self.LocalLog.info(f"控制相关推荐视频请求和处理时发生异常:\n{tb_info}") def process_video_obj(self, video_obj): """ 处理视频对象,包括检查视频时长、用户信息、保存数据等操作。 :param video_obj: 视频对象,包含视频的各种信息 :return: 无返回值,完成视频对象的处理 """ try: video_duration = video_obj["video_url_list"][0]['video_duration'] video_id = video_obj['channel_content_id'] # 检查视频ID是否存在 if self.redis_ops.check_video_id_exists(video_id): self.aliyun_log.logging( code="3004", message=f"重复视频ID:{video_id}" ) self.LocalLog.info(f"重复视频ID: {video_id}") return our_user = random.choice(self.user_list) trace_id = self.platform + str(uuid.uuid1()) item = VideoItem() # account_id = video_obj["channel_account_id"] # account_name = video_obj["channel_account_name"] # account_avatar = video_obj["avatar"] # # 检查用户ID是否存在 # """ # 需要改为判断redis # """ # is_repeat_user = self.db_ops.check_user_id(account_id) # if is_repeat_user: # # 更新用户信息,使用异步方法并等待结果 # self.LocalLog.info(f"用户{account_id}已经存在数据库中") # self.db_ops.update_user(account_id, account_name, account_avatar) # else: # self.LocalLog.info(f"用户{account_id}没在数据库中") # # 插入用户信息,使用异步方法并等待结果 # self.db_ops.insert_user(account_id, account_name, account_avatar) # self.redis_ops.add_user_data("task:zqkd_user_id", json.dumps({"uid": account_id})) # self.aliyun_log.logging(code="1007", message=f"用户数据写入成功,用户ID:{account_id}") # self.LocalLog.info(f"用户数据写入成功,用户ID: {account_id}") if video_duration > self.rule_dict.get("duration", {}).get("max", 1200) or video_duration < self.rule_dict.get( "duration", {}).get("min", 30): self.aliyun_log.logging( code="3005", message=f"视频时长不满足条件[>=30s&<=1200s]视频ID:{video_obj['channel_content_id']},视频时长:{video_duration}" ) self.LocalLog.info( f"视频时长不满足条件,视频ID: {video_obj['channel_content_id']}, 视频时长: {video_duration}") return item.add_video_info("video_id", video_obj['channel_content_id']) item.add_video_info("video_title", video_obj["title"]) item.add_video_info("play_cnt", self.convert_number(video_obj["read_num"])) item.add_video_info("publish_time_stamp", int(int(video_obj["publish_timestamp"]) / 1000)) item.add_video_info("out_user_id", video_obj["channel_account_id"]) item.add_video_info("cover_url", video_obj["image_url_list"][0]['image_url']) item.add_video_info("like_cnt", 0) item.add_video_info("collection_cnt", 0) item.add_video_info("share_cnt", 0) item.add_video_info("comment_cnt", 0) item.add_video_info("video_url", video_obj["video_url_list"][0]['video_url']) item.add_video_info("out_video_id", int(video_obj["channel_content_id"])) item.add_video_info("duration", video_obj["video_url_list"][0]['video_duration']) item.add_video_info("platform", self.platform) item.add_video_info("strategy", self.mode) item.add_video_info("session", f"{self.platform}-{int(time.time())}") item.add_video_info("user_id", our_user["uid"]) item.add_video_info("user_name", our_user["nick_name"]) mq_obj = item.produce_item() pipeline = PiaoQuanPipeline( platform=self.platform, mode=self.mode, rule_dict=self.rule_dict, env=self.env, item=mq_obj, trace_id=trace_id ) if pipeline.process_item(): title_list = self.title_rule.split(",") title = video_obj["title"] contains_keyword = any(keyword in title for keyword in title_list) if contains_keyword: new_title = GPT4oMini.get_ai_mini_title(title) if new_title: item.add_video_info("video_title", new_title) current_time = datetime.now() formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S") values = [ [ video_obj["video_url_list"][0]['video_url'], video_obj["image_url_list"][0]['image_url'], title, new_title, formatted_time, ] ] FeishuUtils.insert_columns("U5dXsSlPOhiNNCtEfgqcm1iYnpf", "v8S6nL", "ROWS", 1, 2) time.sleep(0.5) FeishuUtils.update_values("U5dXsSlPOhiNNCtEfgqcm1iYnpf", "v8S6nL", "A2:Z2", values) self.download_cnt += 1 self.mq.send_msg(mq_obj) self.aliyun_log.logging( code="2009", message=f"成功发送视频到etl", data={"video_obj": video_obj} ) # 保存视频ID self.redis_ops.save_video_id(video_obj['channel_content_id']) if self.download_cnt >= self.rule_dict.get("videos_cnt", {}).get("min", 300): self.aliyun_log.logging( code="2010", message=f"今日已经达到最大量", data=self.download_cnt ) self.LocalLog.info("视频数量已达到预期") # 判断视频数量达到预期且用户列表没有轮训完 # self.redis_ops.set_last_scanned_id(self.last_scanned_id) self.limit_flag = False except Exception as e: tb_info = traceback.format_exc() self.aliyun_log.logging( code="1005", message=f"处理视频对象时发生异常,错误信息: {str(e)}", data={"video_obj": video_obj} ) self.LocalLog.error(f"处理视频对象时发生异常: {e}\n{tb_info}") def convert_number(self,s): """解析数字字符串,处理包含'万'的情况""" if isinstance(s, int): # 如果已经是int,直接返回 return s elif isinstance(s, str): # 如果是字符串,处理'万' if '万' in s: try: num = float(s.strip('万')) * 10000 return int(num) if num.is_integer() else num # 整数返回int,小数返回float except ValueError: print(f"无法将 '{s}' 转换为有效的数字。") return 0 # 默认返回0或其他默认值 else: try: return int(s) # 尝试转换为int except ValueError: try: return float(s) # 尝试转换为float except ValueError: print(f"'{s}' 不是有效的数字格式。") return 0 # 默认返回0或其他默认值 else: print(f"不支持的类型: {type(s).__name__}") return 0 # 非int/str类型返回默认值 def run(self): """ 运行主流程,执行推荐视频和相关推荐视频的请求,直到达到下载数量限制。 :return: 无返回值,程序运行的主逻辑 """ self.LocalLog.info("开始执行中青看点用户视频抓取...") self.control_request_author() def is_less_than_30_minutes(): now = datetime.now() tomorrow = now.date() + timedelta(days=1) midnight = datetime.combine(tomorrow, datetime.min.time()) time_left = midnight - now return time_left.total_seconds() < 30 * 60 if __name__ == '__main__': ZhongQingKanDianAuthor( platform="zhongqingkandian", mode="author", rule_dict={'videos_cnt': {'min': 500, 'max': 0}, 'duration': {'min': 30, 'max': 1200}}, user_list=[{"uid": 81525568, "link": "中青看点推荐", "nick_name": "芸芸众生"}] ).run()