import json import datetime import traceback from pymysql.cursors import DictCursor from tqdm import tqdm from applications import bot, aiditApi from applications.const import SingleVideoPoolPublishTaskConst from applications.db import DatabaseConnector from config import long_articles_config, apolloConfig config = apolloConfig() const = SingleVideoPoolPublishTaskConst() video_pool_config = json.loads(config.getConfigValue(key="video_pool_publish_config")) class PublishSingleVideoPoolVideos: def __init__(self): self.db_client = DatabaseConnector(db_config=long_articles_config) self.db_client.connect() def get_task_list(self, platform:str) -> list[dict]: daily_limit = video_pool_config[platform]['process_num_each_day'] fetch_query = f""" select id, content_trace_id, pq_vid from single_video_transform_queue where status = {const.TRANSFORM_INIT_STATUS} and platform = '{platform}' order by score desc limit {daily_limit}; """ fetch_response = self.db_client.fetch(query=fetch_query, cursor_type=DictCursor) return fetch_response def update_tasks_status(self, task_id_tuple: tuple, ori_status: int, new_status: int)-> int: update_query = f""" update single_video_transform_queue set status = %s where id in %s and status = %s; """ affected_rows = self.db_client.save( query=update_query, params=(new_status, task_id_tuple, ori_status) ) return affected_rows def deal(self): """ entrance of this class """ platform_list = ["sph", "gzh", "toutiao", "hksp", "sohu"] for platform in tqdm(platform_list, desc='process each platform'): task_list = self.get_task_list(platform) task_id_tuple = tuple([task['id'] for task in task_list]) vid_list = [task['pq_vid'] for task in task_list] if vid_list: try: # create video crawler plan plan_name = f"{video_pool_config[platform]['nick_name']}-{datetime.datetime.today().strftime('%Y-%m-%d')}-视频数量: {len(vid_list)}" crawler_plan_response = aiditApi.auto_create_single_video_crawler_task( plan_name=plan_name, plan_tag="单视频供给冷启动", video_id_list=vid_list, ) crawler_plan_id = crawler_plan_response["data"]["id"] crawler_plan_name = crawler_plan_response["data"]["name"] # bind crawler plan to generate plan crawler_task_list = [ { "contentType": 1, "inputSourceModal": 4, "inputSourceChannel": 10, "inputSourceType": 2, "inputSourceValue": crawler_plan_id, "inputSourceSubType": None, "fieldName": None, "inputSourceLabel": "原始帖子-视频-票圈小程序-内容添加计划-{}".format(crawler_plan_name), } ] generate_plan_id = video_pool_config[platform]['generate_plan_id'] aiditApi.bind_crawler_task_to_generate_task( crawler_task_list=crawler_task_list, generate_task_id=generate_plan_id, ) # update status self.update_tasks_status( task_id_tuple=task_id_tuple, ori_status=const.TRANSFORM_INIT_STATUS, new_status=const.TRANSFORM_SUCCESS_STATUS ) except Exception as e: bot( title='视频内容池发布任务', detail={ 'platform': platform, 'date': datetime.datetime.today().strftime('%Y-%m-%d'), 'msg': '发布视频内容池失败,原因:{}'.format(str(e)), 'detail': traceback.format_exc(), }, mention=False ) else: bot( title='视频内容池发布任务', detail={ 'platform': platform, 'date': datetime.datetime.today().strftime('%Y-%m-%d'), 'msg': '该平台无待发布视频,请关注供给的抓取' }, mention=False )