import time import traceback import json import uuid from my_config import set_config from my_utils import request_post, filter_video_status, send_msg_to_feishu, filter_video_status_app, \ filter_political_videos from log import Log from db_helper import RedisHelper from odps import ODPS config_, _ = set_config() log_ = Log() def get_videos_from_flow_pool(app_type, size=1000): """ 从流量池获取视频,循环获取,直到返回数据为None结束 :param app_type: 产品标识 type-int :param size: 每次获取视频数量,type-int,默认1000 :param isSupply: 是否为供给流量池 1 是 0 否 :return: videos [{'videoId': 1111, 'flowPool': ''}, ...] """ # 获取批次标识,利用首次获取数据时间戳为标记 batch_flag = int(time.time()) * 1000 + 333 log_.info(f"batch_flag: {batch_flag}") request_data = {'appType': app_type, 'batchFlag': batch_flag, 'size': size, 'isSupply': 1} videos = [] retry = 0 while True: print(config_.GET_VIDEOS_FROM_POOL_URL) result = request_post(request_url=config_.GET_VIDEOS_FROM_POOL_URL, request_data=request_data) if result is None: if retry > 2: break retry += 1 continue if result['code'] != 0: log_.info('supply batch_flag: {}, 获取流量池视频失败'.format(batch_flag)) if retry > 2: break retry += 1 continue if not result['data']: if retry > 2: break retry += 1 continue videos.extend(result['data']) return videos def update_remain_view_count(video_info_list): """ 获取视频在流量池中的剩余可分发数,并存入对应的redis中 :param app_type: 产品标识 type-int :param video_info_list: 视频信息 (视频id, 流量池标记) type-list,[(video_id, flow_pool), ...] :return: data type-list,[(video_id, flow_pool, view_count), ...] """ redis_helper = RedisHelper() if not video_info_list: return [] remain_videos = [] # 每次请求10个 for i in range(len(video_info_list)//10 + 1): remain_st_time = time.time() videos = [{'videoId': info[0], 'flowPool': info[1]} for info in video_info_list[i*10:(i+1)*10]] request_data = {'videos': videos} result = request_post(request_url=config_.GET_REMAIN_VIEW_COUNT_URL, request_data=request_data, timeout=(0.5, 3)) log_.info(f"supply i = {i}, expend time = {(time.time()-remain_st_time)*1000}") if result is None: continue if result['code'] != 0: log_.error('supply 获取视频在流量池中的剩余可分发数失败') continue for item in result['data']: if item['distributeCount'] is None: continue distribute_count = int(item['distributeCount']) if distribute_count > 0: remain_videos.append(item['videoId']) # 将分发数更新到本地记录 key_name = f"{config_.SUPPLY_LOCAL_DISTRIBUTE_COUNT_PREFIX}{item['videoId']}:{item['flowPool']}" redis_helper.set_data_to_redis(key_name=key_name, value=distribute_count, expire_time=25 * 60) else: # 将本地记录删除 key_name = f"{config_.SUPPLY_LOCAL_DISTRIBUTE_COUNT_PREFIX}{item['videoId']}:{item['flowPool']}" redis_helper.del_keys(key_name=key_name) log_.info(f"新增加不分发过滤前后整体数量: {len(video_info_list)}:{len(remain_videos)}") return remain_videos def update_flow_pool(flow_pool_id_list): """ 获取流量池可分发视频,并将结果上传Redis :param app_type: 产品标识 type-int :return: None """ # 所有产品都从0取数据 app_type = 0 try: # 从流量池获取数据 videos = get_videos_from_flow_pool(app_type=app_type) if len(videos) <= 0: log_.info('supply 流量池中无需分发的视频') return # video_id 与 flow_pool, level 进行mapping video_ids = set() log_.info('supply 流量池中视频数:{}'.format(len(videos))) mapping = {} for video in videos: flow_pool_id = video['flowPoolId'] # 召回使用的切分ID是在这里做的。流量池中的视频是不区分ID的,也不区分层。 if int(flow_pool_id) not in flow_pool_id_list: continue # print(f"flow_pool_id: {flow_pool_id}") video_id = video['videoId'] video_ids.add(video_id) item_info = {'flowPool': video['flowPool'], 'level': video['level']} if video_id in mapping: mapping[video_id].append(item_info) else: mapping[video_id] = [item_info] log_.info(f"supply 需更新流量池视频数: {len(video_ids)}") # 对视频状态进行过滤 filtered_videos = filter_video_status(list(video_ids)) log_.info('filter videos status finished, filtered_videos nums={}'.format(len(filtered_videos))) if not filtered_videos: log_.info('supply 流量池中视频状态不符合分发') return # 1. 更新分发数量,过滤分发数量为0的视频 video_info_list = [] for video_id in filtered_videos: for item in mapping.get(video_id): flow_pool = item['flowPool'] video_info = (video_id, flow_pool) if video_info not in video_info_list: video_info_list.append(video_info) log_.info(f"video_info_list count = {len(video_info_list)}") remain_videos = update_remain_view_count(video_info_list) if not remain_videos: log_.info('流量池中视频状态不符合分发') return # 上传数据到redis quick_flow_pool_redis_data = set() flow_pool_redis_data = set() for video_id in remain_videos: for item in mapping.get(video_id): flow_pool = item['flowPool'] # 判断是否为快速曝光流量池视频 value = '{}-{}'.format(video_id, flow_pool) flow_pool_redis_data.add(value) # 2. quick曝光池 redis_helper = RedisHelper() # 3. 普通流量池 log_.info(f"videos_count: {len(flow_pool_redis_data)}") flow_pool_key_name = f"{config_.FLOWPOOL_KEY_NAME_PREFIX_SET_LEVEL_SUPPLY}{app_type}" # 如果key已存在,删除key if redis_helper.key_exists(flow_pool_key_name): redis_helper.del_keys(flow_pool_key_name) # 写入redis if flow_pool_redis_data: redis_helper.add_data_with_set(key_name=flow_pool_key_name, values=flow_pool_redis_data, expire_time=24 * 3600) result = redis_helper.get_data_from_set(flow_pool_key_name) if not result: result = [] size = len(result) log_.info(f'写入成功key={flow_pool_key_name}:{size}') log_.info('supply data to redis finished!') except Exception as e: log_.error('supply 流量池更新失败, appType: {} exception: {}, traceback: {}'.format( app_type, e, traceback.format_exc())) send_msg_to_feishu( webhook=config_.FEISHU_ROBOT['server_robot'].get('webhook'), key_word=config_.FEISHU_ROBOT['server_robot'].get('key_word'), msg_text='rov-offline{} - supply 流量池更新失败, appType: {}, exception: {}'.format(config_.ENV_TEXT, app_type, e) ) return video_info_list if __name__ == '__main__': st_time = time.time() # 为避免第一个app_type获取数据不全,等待1min log_.info('flow pool predict start...') # 获取对应流量池id列表 redis_helper = RedisHelper() flow_pool_abtest_config = redis_helper.get_data_from_redis(key_name=config_.FLOWPOOL_ABTEST_KEY_NAME) if flow_pool_abtest_config is not None: flow_pool_abtest_config = json.loads(flow_pool_abtest_config) else: flow_pool_abtest_config = {} flow_pool_id_list = flow_pool_abtest_config.get('supply_flow_set_level', []) log_.info('predict start...') update_flow_pool(flow_pool_id_list=flow_pool_id_list) log_.info('predict end...') log_.info(f"expend time = {(time.time() - st_time) * 1000}ms") log_.info('flow pool predict end...') # python flowpool_data_update_with_level.py 测试环境必须手动执行python 才能有数据