import datetime from pymysql.cursors import DictCursor from applications import bot, aiditApi from applications.db import DatabaseConnector from config import long_articles_config generate_plan_map = { "gzh": "20250321060236316993274", "sph": "20250321055917369191992", "toutiao": "20250321060107537529410", "hksp": "20250321060438155415100", } platform_name_map = { "gzh": "公众号", "sph": "视频号", "toutiao": "头条号", "hksp": "好看视频", } class PublishSingleVideoPoolVideos: def __init__(self): self.db_client = DatabaseConnector(db_config=long_articles_config) self.db_client.connect() def get_task_list(self, platform:str) -> list[dict]: match platform: case "sph": task_count = 218 case "gzh": task_count = 201 case "toutiao": task_count = 411 case "hksp": task_count = 165 case _: return [] fetch_query = f""" select id, content_trace_id, pq_vid from single_video_transform_queue where status = 0 and platform = '{platform}' order by score desc limit 2000 """ fetch_response = self.db_client.fetch(query=fetch_query, cursor_type=DictCursor) return fetch_response def update_tasks_status(self, task_id_tuple: tuple, ori_status: int, new_status: int)-> int: update_query = f""" update single_video_transform_queue set status = %s where id in %s and status = %s; """ affected_rows = self.db_client.save( query=update_query, params=(new_status, task_id_tuple, ori_status) ) return affected_rows def deal(self): """ entrance of this class """ platform_list = ["sph", "gzh", "toutiao", "hksp"] for platform in platform_list: task_list = self.get_task_list(platform) task_id_tuple = tuple([task['id'] for task in task_list]) vid_list = [task['pq_vid'] for task in task_list] if vid_list: # create video crawler plan plan_name = f"{platform_name_map[platform]}-{datetime.datetime.today().strftime('%Y-%m-%d')}-视频数量: {len(vid_list)}" crawler_plan_response = aiditApi.auto_create_single_video_crawler_task( plan_name=plan_name, plan_tag="单视频供给冷启动", video_id_list=vid_list, ) crawler_plan_id = crawler_plan_response["data"]["id"] crawler_plan_name = crawler_plan_response["data"]["name"] # update status self.update_tasks_status( task_id_tuple=task_id_tuple, ori_status=0, new_status=1, ) # bind crawler plan to generate plan crawler_task_list = [ { "contentType": 1, "inputSourceModal": 4, "inputSourceChannel": 10, "inputSourceType": 2, "inputSourceValue": crawler_plan_id, "inputSourceSubType": None, "fieldName": None, "inputSourceLabel": "原始帖子-视频-票圈小程序-内容添加计划-{}".format(crawler_plan_name), } ] generate_plan_id = generate_plan_map[platform] aiditApi.bind_crawler_task_to_generate_task( crawler_task_list=crawler_task_list, generate_task_id=generate_plan_id, )