丁云鹏 3 місяців тому
батько
коміт
3af6b23d9e

+ 11 - 0
flowpool_data_update_with_level_task_v2.sh

@@ -0,0 +1,11 @@
+source /etc/profile
+echo $ROV_OFFLINE_ENV
+if [[ $ROV_OFFLINE_ENV == 'test' ]]; then
+#    cd /data2/rov-offline && /root/anaconda3/bin/python /data2/rov-offline/pool_predict.py &
+#    cd /data2/rov-offline && /root/anaconda3/bin/python /data2/rov-offline/flowpool_data_update.py
+    cd /data2/rov-offline && /root/anaconda3/bin/python /data2/rov-offline/flowpool_data_update_with_level_v2.py
+elif [[ $ROV_OFFLINE_ENV == 'pro' ]]; then
+#    cd /data/rov-offline && /root/anaconda3/bin/python /data/rov-offline/pool_predict.py
+#    cd /data/rov-offline && /root/anaconda3/bin/python /data/rov-offline/flowpool_data_update.py
+    cd /data/rov-offline && /root/anaconda3/bin/python /data/rov-offline/flowpool_data_update_with_level_v2.py
+fi

+ 244 - 0
flowpool_data_update_with_level_v2.py

@@ -0,0 +1,244 @@
+import datetime
+import random
+import time
+import os
+import traceback
+import random
+import json
+
+from my_config import set_config
+from my_utils import request_post, filter_video_status, send_msg_to_feishu, filter_video_status_app, \
+    filter_political_videos
+from log import Log
+from db_helper import RedisHelper
+from odps import ODPS
+
+config_, _ = set_config()
+log_ = Log()
+
+
+def get_videos_from_flow_pool(app_type, size=1000):
+    """
+    从流量池获取视频,循环获取,直到返回数据为None结束
+    :param app_type: 产品标识 type-int
+    :param size: 每次获取视频数量,type-int,默认1000
+    :return: videos  [{'videoId': 1111, 'flowPool': ''}, ...]
+    """
+    # 获取批次标识,利用首次获取数据时间戳为标记
+    batch_flag = int(time.time())
+    request_data = {'appType': app_type, 'batchFlag': batch_flag, 'size': size}
+    videos = []
+    retry = 0
+    while True:
+        print(config_.GET_VIDEOS_FROM_POOL_URL)
+        result = request_post(request_url=config_.GET_VIDEOS_FROM_POOL_URL, request_data=request_data)
+        if result is None:
+            if retry > 2:
+                break
+            retry += 1
+            continue
+        if result['code'] != 0:
+            log_.info('batch_flag: {}, 获取流量池视频失败'.format(batch_flag))
+            if retry > 2:
+                break
+            retry += 1
+            continue
+        if not result['data']:
+            if retry > 2:
+                break
+            retry += 1
+            continue
+        videos.extend(result['data'])
+    return videos
+
+
+def update_remain_view_count(video_info_list):
+    """
+    获取视频在流量池中的剩余可分发数,并存入对应的redis中
+    :param app_type: 产品标识 type-int
+    :param video_info_list: 视频信息 (视频id, 流量池标记) type-list,[(video_id, flow_pool), ...]
+    :return: data type-list,[(video_id, flow_pool, view_count), ...]
+    """
+    redis_helper = RedisHelper()
+    if not video_info_list:
+        return
+
+    # 每次请求10个
+    bu_fen_fa_cnt = 0
+    for i in range(len(video_info_list)//10 + 1):
+        remain_st_time = time.time()
+        videos = [{'videoId': info[0], 'flowPool': info[1]} for info in video_info_list[i*10:(i+1)*10]]
+        request_data = {'videos': videos}
+        result = request_post(request_url=config_.GET_REMAIN_VIEW_COUNT_URL,
+                              request_data=request_data, timeout=(0.5, 3))
+        log_.info(f"i = {i}, expend time = {(time.time()-remain_st_time)*1000}")
+        if result is None:
+            continue
+        if result['code'] != 0:
+            log_.error('获取视频在流量池中的剩余可分发数失败')
+            continue
+        for item in result['data']:
+            if item['distributeCount'] is None:
+                continue
+            distribute_count = int(item['distributeCount'])
+            if distribute_count > 0:
+                # 将分发数更新到本地记录
+                key_name = f"{config_.LOCAL_DISTRIBUTE_COUNT_PREFIX}{item['videoId']}:{item['flowPool']}"
+                redis_helper.set_data_to_redis(key_name=key_name, value=distribute_count, expire_time=25 * 60)
+            else:
+                # 将本地记录删除
+                key_name = f"{config_.LOCAL_DISTRIBUTE_COUNT_PREFIX}{item['videoId']}:{item['flowPool']}"
+                redis_helper.del_keys(key_name=key_name)
+
+                bu_fen_fa_cnt = bu_fen_fa_cnt + 1
+    log_.info(f"新增加不分发过滤前后整体数量: {len(video_info_list)}:{str(bu_fen_fa_cnt)}")
+def get_flow_pool_recommend_config(flow_pool_id):
+    """获取流量池推荐分发配置"""
+    result = request_post(request_url=config_.GET_FLOW_POOL_RECOMMEND_CONFIG_URL)
+    if result is None:
+        return None
+    if result['code'] != 0:
+        return None
+    flow_pool_distribute_config = result['data'].get('flowPoolDistributeConfig')
+    if flow_pool_distribute_config:
+        if int(eval(flow_pool_distribute_config).get('flowPoolId')) == flow_pool_id:
+            return eval(eval(flow_pool_distribute_config).get('distributeRate'))
+        else:
+            return None
+    else:
+        return None
+
+def update_flow_pool(flow_pool_id_list):
+    """
+    获取流量池可分发视频,并将结果上传Redis
+    :param app_type: 产品标识 type-int
+    :return: None
+    """
+    # 所有产品都从0取数据
+    app_type = 0
+
+    try:
+        # 从流量池获取数据
+        videos = get_videos_from_flow_pool(app_type=app_type)
+        if len(videos) <= 0:
+            log_.info('流量池中无需分发的视频')
+            return video_info_list
+        # video_id 与 flow_pool, level 进行mapping
+        video_ids = set()
+        log_.info('流量池中视频数:{}'.format(len(videos)))
+        mapping = {}
+        for video in videos:
+            flow_pool_id = video['flowPoolId'] # 召回使用的切分ID是在这里做的。流量池中的视频是不区分ID的,也不区分层。
+            if int(flow_pool_id) not in flow_pool_id_list:
+                continue
+            # print(f"flow_pool_id: {flow_pool_id}")
+            video_id = video['videoId']
+            video_ids.add(video_id)
+            item_info = {'flowPool': video['flowPool'], 'level': video['level']}
+            if video_id in mapping:
+                mapping[video_id].append(item_info)
+            else:
+                mapping[video_id] = [item_info]
+        log_.info(f"需更新流量池视频数: {len(video_ids)}")
+
+        # 对视频状态进行过滤
+        filtered_videos = filter_video_status(list(video_ids))
+        log_.info('filter videos status finished, filtered_videos nums={}'.format(len(filtered_videos)))
+
+
+        if not filtered_videos:
+            log_.info('流量池中视频状态不符合分发')
+            return video_info_list
+
+        # 上传数据到redis
+        quick_flow_pool_redis_data = set()
+        # 普通流量池视频按照层级存储
+        flow_pool_redis_data = set()
+
+        for video_id in filtered_videos:
+            for item in mapping.get(video_id):
+                flow_pool = item['flowPool']
+                # 判断是否为快速曝光流量池视频
+                value = '{}-{}'.format(video_id, flow_pool)
+                flow_pool_id = int(flow_pool.split('#')[0])  # flowPool: 流量池ID#分级ID#级别Level#生命周期ID
+                if flow_pool_id == config_.QUICK_FLOW_POOL_ID:
+                    quick_flow_pool_redis_data.add(value)
+                else:
+                    flow_pool_redis_data.add(value)
+
+                video_info = (video_id, flow_pool)
+                if video_info not in video_info_list:
+                    video_info_list.append(video_info)
+
+        # 写入redis
+
+        # 1. 分发数量
+         # 更新剩余分发数
+        log_.info(f"video_info_list count = {len(video_info_list)}")
+        update_remain_view_count(video_info_list)
+
+
+        # 2. quick曝光池
+        redis_helper = RedisHelper()
+        quick_flow_pool_key_name = f"{config_.QUICK_FLOWPOOL_KEY_NAME_PREFIX_SET}{app_type}:{config_.QUICK_FLOW_POOL_ID}"
+        # 如果key已存在,删除key
+        if redis_helper.key_exists(quick_flow_pool_key_name):
+            redis_helper.del_keys(quick_flow_pool_key_name)
+        if quick_flow_pool_redis_data:
+            log_.info(f"quick_flow_pool_redis_data = {quick_flow_pool_redis_data}")
+            redis_helper.add_data_with_set(key_name=quick_flow_pool_key_name, values=quick_flow_pool_redis_data,
+                                           expire_time=24 * 3600)
+            # 快速流量池分发概率存入redis
+            distribute_rate_key_name = f"{config_.QUICK_FLOWPOOL_DISTRIBUTE_RATE_KEY_NAME_PREFIX}{config_.QUICK_FLOW_POOL_ID}"
+            distribute_rate = get_flow_pool_recommend_config(flow_pool_id=config_.QUICK_FLOW_POOL_ID)
+            if distribute_rate is not None:
+                redis_helper.set_data_to_redis(key_name=distribute_rate_key_name, value=distribute_rate,
+                                               expire_time=15 * 60)
+
+        # 3. 普通流量池
+        log_.info(f"videos_count: {len(flow_pool_redis_data)}")
+        flow_pool_key_name = f"{config_.FLOWPOOL_KEY_NAME_PREFIX_SET_LEVEL}{app_type}"
+        # 如果key已存在,删除key
+        if redis_helper.key_exists(flow_pool_key_name):
+            redis_helper.del_keys(flow_pool_key_name)
+        # 写入redis
+        if flow_pool_redis_data:
+            redis_helper.add_data_with_set(key_name=flow_pool_key_name, values=flow_pool_redis_data, expire_time=24 * 3600)
+            result = redis_helper.get_data_from_set(flow_pool_key_name)
+            if not result:
+                result = []
+            size = len(result)
+            log_.info(f'写入成功key={flow_pool_key_name}:{size}')
+
+        log_.info('data to redis finished!')
+
+
+    except Exception as e:
+        log_.error('流量池更新失败, appType: {} exception: {}, traceback: {}'.format(
+            app_type, e, traceback.format_exc()))
+        send_msg_to_feishu(
+            webhook=config_.FEISHU_ROBOT['server_robot'].get('webhook'),
+            key_word=config_.FEISHU_ROBOT['server_robot'].get('key_word'),
+            msg_text='rov-offline{} - 流量池更新失败, appType: {}, exception: {}'.format(config_.ENV_TEXT, app_type, e)
+        )
+
+if __name__ == '__main__':
+    st_time = time.time()
+    # 为避免第一个app_type获取数据不全,等待1min
+    time.sleep(60)
+    log_.info('flow pool predict start...')
+    # 获取对应流量池id列表
+    redis_helper = RedisHelper()
+    flow_pool_abtest_config = redis_helper.get_data_from_redis(key_name=config_.FLOWPOOL_ABTEST_KEY_NAME)
+    if flow_pool_abtest_config is not None:
+        flow_pool_abtest_config = json.loads(flow_pool_abtest_config)
+    else:
+        flow_pool_abtest_config = {}
+    flow_pool_id_list = flow_pool_abtest_config.get('experimental_flow_set_level', [])
+    log_.info('predict start...')
+    update_flow_pool(flow_pool_id_list=flow_pool_id_list)
+    log_.info('predict end...')
+    log_.info(f"expend time = {(time.time() - st_time) * 1000}ms")
+    log_.info('flow pool predict end...')
+
+# python flowpool_data_update_with_level.py    测试环境必须手动执行python 才能有数据