فهرست منبع

add abtest: 139

liqian 2 سال پیش
والد
کامیت
2f853f446e
3فایلهای تغییر یافته به همراه308 افزوده شده و 0 حذف شده
  1. 22 0
      config.py
  2. 23 0
      region_rule_rank_h.py
  3. 263 0
      rule_rank_day_by_30day.py

+ 22 - 0
config.py

@@ -164,6 +164,21 @@ class BaseConfig(object):
         ],
     }
 
+    # 天级更新过去30天数据 loghubods.video_data_30days_dataset_total_apptype
+    PROJECT_30DAY_APP_TYPE = 'loghubods'
+    TABLE_30DAY_APP_TYPE = 'video_data_30days_dataset_total_apptype'
+
+    # 天级更新过去30天数据规则参数
+    RULE_PARAMS_30DAY_APP_TYPE = {
+        'rule_params': {
+            'rule1': {'return_count': 100, 'platform_return_rate': 0.001, 'view_type': 'preview'},
+        },
+        'data_params': DATA_PARAMS,
+        'params_list': [
+            {'data': 'data1', 'rule': 'rule1'},
+        ]
+    }
+
     # 小时级更新过去24h数据 loghubods.video_data_each_hour_dataset_24h_total_apptype
     PROJECT_24H_APP_TYPE = 'loghubods'
     TABLE_24H_APP_TYPE = 'video_data_each_hour_dataset_24h_total_apptype'
@@ -230,6 +245,8 @@ class BaseConfig(object):
             #           'region_24h_rule_key': 'rule3', '24h_rule_key': 'rule2'},
             'rule7': {'view_type': 'video-show-region', 'platform_return_rate': 0.001,
                       'region_24h_rule_key': 'rule4', '24h_rule_key': 'rule4', 'merge_func': 2},
+            'rule9': {'view_type': 'video-show-region', 'platform_return_rate': 0.001,
+                      'region_24h_rule_key': 'rule2', '24h_rule_key': 'rule3', '30day_rule_key': 'rule1'}
         },
         'data_params': DATA_PARAMS,
         'params_list': [
@@ -239,6 +256,7 @@ class BaseConfig(object):
             {'data': 'data3', 'rule': 'rule7'},
             {'data': 'data4', 'rule': 'rule7'},
             {'data': 'data6', 'rule': 'rule7'},
+            {'data': 'data1', 'rule': 'rule9'},
         ],
     }
 
@@ -309,6 +327,10 @@ class BaseConfig(object):
     # 小时级视频状态不符合推荐要求的列表 redis key,完整格式:com.weiqu.video.filter.apptype.h.item.24h.{appType}.{data_key}.{rule_key}
     H_VIDEO_FILER_24H = 'com.weiqu.video.filter.apptype.h.item.24h.'
 
+    # 小程序相对30天数据天级更新结果存放 redis key前缀,
+    # 完整格式:recall:item:score:30day:{data_key}:{rule_key}:{date}
+    RECALL_KEY_NAME_PREFIX_30DAY = 'recall:item:score:30day:'
+
     # 小程序地域分组小时级更新结果存放 redis key前缀,
     # 完整格式:recall:item:score:region:h:{region}:{data_key}:{rule_key}:{date}:{h}
     RECALL_KEY_NAME_PREFIX_REGION_BY_H = 'recall:item:score:region:h:'

+ 23 - 0
region_rule_rank_h.py

@@ -92,6 +92,22 @@ def get_rov_redis_key(now_date):
     return key_name
 
 
+def get_day_30day_videos(now_date, data_key, rule_key):
+    """获取天级更新相对30天的视频id"""
+    redis_helper = RedisHelper()
+    day_30day_recall_key_prefix = config_.RECALL_KEY_NAME_PREFIX_30DAY
+    now_dt = datetime.datetime.strftime(now_date, '%Y%m%d')
+    day_30day_recall_key_name = f"{day_30day_recall_key_prefix}{data_key}:{rule_key}:{now_dt}"
+    if not redis_helper.key_exists(key_name=day_30day_recall_key_name):
+        redis_dt = datetime.datetime.strftime((now_date - datetime.timedelta(days=1)), '%Y%m%d')
+        day_30day_recall_key_name = f"{day_30day_recall_key_prefix}{data_key}:{rule_key}:{redis_dt}"
+    data = redis_helper.get_all_data_from_zset(key_name=day_30day_recall_key_name, with_scores=True)
+    if data is None:
+        return None
+    video_ids = [int(video_id) for video_id, _ in data]
+    return video_ids
+
+
 def get_feature_data(project, table, now_date):
     """获取特征数据"""
     dt = datetime.datetime.strftime(now_date, '%Y%m%d%H')
@@ -175,6 +191,13 @@ def video_rank(df, now_date, now_h, rule_key, param, region, data_key, rule_rank
 
     # 写入对应的redis
     h_video_ids = []
+    by_30day_rule_key = param.get('30day_rule_key', None)
+    if by_30day_rule_key is not None:
+        # 与相对30天列表去重
+        h_video_ids = get_day_30day_videos(now_date=now_date, data_key=data_key, rule_key=by_30day_rule_key)
+        if h_video_ids is not None:
+            filtered_videos = [video_id for video_id in filtered_videos if int(video_id) not in h_video_ids]
+
     h_recall_result = {}
     for video_id in filtered_videos:
         score = h_recall_df[h_recall_df['videoid'] == video_id]['score']

+ 263 - 0
rule_rank_day_by_30day.py

@@ -0,0 +1,263 @@
+import pandas as pd
+import math
+from functools import reduce
+from odps import ODPS
+from threading import Timer
+from datetime import datetime, timedelta
+from get_data import get_data_from_odps
+from db_helper import RedisHelper
+from utils import filter_video_status, check_table_partition_exits
+from config import set_config
+from log import Log
+
+config_, _ = set_config()
+log_ = Log()
+
+features = [
+    'apptype',
+    'videoid',
+    'preview人数',  # 过去24h预曝光人数
+    'view人数',  # 过去24h曝光人数
+    'play人数',  # 过去24h播放人数
+    'share人数',  # 过去24h分享人数
+    '回流人数',  # 过去24h分享,过去24h回流人数
+    'preview次数',  # 过去24h预曝光次数
+    'view次数',  # 过去24h曝光次数
+    'play次数',  # 过去24h播放次数
+    'share次数',  # 过去24h分享次数
+    'platform_return',
+    'platform_preview',
+    'platform_preview_total',
+    'platform_show',
+    'platform_show_total',
+    'platform_view',
+    'platform_view_total',
+]
+
+
+def data_check(project, table, now_date):
+    """检查数据是否准备好"""
+    odps = ODPS(
+        access_id=config_.ODPS_CONFIG['ACCESSID'],
+        secret_access_key=config_.ODPS_CONFIG['ACCESSKEY'],
+        project=project,
+        endpoint=config_.ODPS_CONFIG['ENDPOINT'],
+        connect_timeout=3000,
+        read_timeout=500000,
+        pool_maxsize=1000,
+        pool_connections=1000
+    )
+
+    try:
+        dt = datetime.strftime(now_date, '%Y%m%d')
+        check_res = check_table_partition_exits(date=dt, project=project, table=table)
+        if check_res:
+            sql = f'select * from {project}.{table} where dt = {dt}'
+            with odps.execute_sql(sql=sql).open_reader() as reader:
+                data_count = reader.count
+        else:
+            data_count = 0
+    except Exception as e:
+        data_count = 0
+    return data_count
+
+
+def get_feature_data(now_date, project, table):
+    """获取特征数据"""
+    dt = datetime.strftime(now_date, '%Y%m%d')
+    records = get_data_from_odps(date=dt, project=project, table=table)
+    feature_data = []
+    for record in records:
+        item = {}
+        for feature_name in features:
+            item[feature_name] = record[feature_name]
+        feature_data.append(item)
+    feature_df = pd.DataFrame(feature_data)
+    return feature_df
+
+
+def cal_score(df, param):
+    # score计算公式: score = share次数/(view+1000)+0.01*return/(share次数+100)
+    df = df.fillna(0)
+    if param.get('view_type', None) == 'video-show':
+        df['share_rate'] = df['share次数'] / (df['platform_show'] + 1000)
+    elif param.get('view_type', None) == 'preview':
+        df['share_rate'] = df['share次数'] / (df['preview人数'] + 1000)
+    else:
+        df['share_rate'] = df['share次数'] / (df['view人数'] + 1000)
+    df['back_rate'] = df['回流人数'] / (df['share次数'] + 100)
+    df['score'] = df['share_rate'] + 0.01 * df['back_rate']
+    df['platform_return_rate'] = df['platform_return'] / df['回流人数']
+    df = df.sort_values(by=['score'], ascending=False)
+    return df
+
+
+def video_rank_h(df, now_date, rule_key, param, data_key):
+    """
+    获取符合进入召回源条件的视频,与每日更新的rov模型结果视频列表进行合并
+    :param df:
+    :param now_date:
+    :param rule_key: 天级规则数据进入条件
+    :param param: 天级规则数据进入条件参数
+    :param data_key: 使用数据标识
+    :return:
+    """
+    redis_helper = RedisHelper()
+    log_.info(f"videos_count = {len(df)}")
+
+    # videoid重复时,保留分值高
+    df = df.sort_values(by=['score'], ascending=False)
+    df = df.drop_duplicates(subset=['videoid'], keep='first')
+    df['videoid'] = df['videoid'].astype(int)
+
+    # 获取符合进入召回源条件的视频
+    return_count = param.get('return_count')
+    if return_count:
+        day_recall_df = df[df['回流人数'] > return_count]
+    else:
+        day_recall_df = df
+    platform_return_rate = param.get('platform_return_rate', 0)
+    day_recall_df = day_recall_df[day_recall_df['platform_return_rate'] > platform_return_rate]
+    day_recall_videos = day_recall_df['videoid'].to_list()
+    log_.info(f'day_by30day_recall videos count = {len(day_recall_videos)}')
+
+    # 视频状态过滤
+    filtered_videos = filter_video_status(day_recall_videos)
+    log_.info('filtered_videos count = {}'.format(len(filtered_videos)))
+
+    # 写入对应的redis
+    now_dt = datetime.strftime(now_date, '%Y%m%d')
+    day_video_ids = []
+    day_recall_result = {}
+    for video_id in filtered_videos:
+        score = day_recall_df[day_recall_df['videoid'] == video_id]['score']
+        day_recall_result[int(video_id)] = float(score)
+        day_video_ids.append(int(video_id))
+
+    day_30day_recall_key_name = f"{config_.RECALL_KEY_NAME_PREFIX_30DAY}{data_key}:{rule_key}:{now_dt}"
+
+    if len(day_recall_result) > 0:
+        log_.info(f"count = {len(day_recall_result)}, key = {day_30day_recall_key_name}")
+        redis_helper.add_data_with_zset(key_name=day_30day_recall_key_name, data=day_recall_result,
+                                        expire_time=2 * 24 * 3600)
+
+
+def merge_df(df_left, df_right):
+    """
+    df按照videoid 合并,对应特征求和
+    :param df_left:
+    :param df_right:
+    :return:
+    """
+    df_merged = pd.merge(df_left, df_right, on=['videoid'], how='outer', suffixes=['_x', '_y'])
+    df_merged.fillna(0, inplace=True)
+    feature_list = ['videoid']
+    for feature in features:
+        if feature in ['apptype', 'videoid']:
+            continue
+        df_merged[feature] = df_merged[f'{feature}_x'] + df_merged[f'{feature}_y']
+        feature_list.append(feature)
+    return df_merged[feature_list]
+
+
+def merge_df_with_score(df_left, df_right):
+    """
+    df 按照videoid合并,平台回流人数、回流人数、分数 分别求和
+    :param df_left:
+    :param df_right:
+    :return:
+    """
+    df_merged = pd.merge(df_left, df_right, on=['videoid'], how='outer', suffixes=['_x', '_y'])
+    df_merged.fillna(0, inplace=True)
+    feature_list = ['videoid', '回流人数', 'platform_return', 'score']
+    for feature in feature_list[1:]:
+        df_merged[feature] = df_merged[f'{feature}_x'] + df_merged[f'{feature}_y']
+    return df_merged[feature_list]
+
+
+def rank(now_date, rule_params, project, table):
+    # 获取特征数据
+    feature_df = get_feature_data(now_date=now_date, project=project, table=table)
+    feature_df['apptype'] = feature_df['apptype'].astype(int)
+    # rank
+    data_params_item = rule_params.get('data_params')
+    rule_params_item = rule_params.get('rule_params')
+
+    for param in rule_params.get('params_list'):
+        score_df_list = []
+        data_key = param.get('data')
+        data_param = data_params_item.get(data_key)
+        log_.info(f"data_key = {data_key}, data_param = {data_param}")
+        rule_key = param.get('rule')
+        rule_param = rule_params_item.get(rule_key)
+        log_.info(f"rule_key = {rule_key}, rule_param = {rule_param}")
+        merge_func = rule_param.get('merge_func', 1)
+
+        if merge_func == 2:
+            for apptype, weight in data_param.items():
+                df = feature_df[feature_df['apptype'] == apptype]
+                # 计算score
+                score_df = cal_score(df=df, param=rule_param)
+                score_df['score'] = score_df['score'] * weight
+                score_df_list.append(score_df)
+            # 分数合并
+            df_merged = reduce(merge_df_with_score, score_df_list)
+            # 更新平台回流比
+            df_merged['platform_return_rate'] = df_merged['platform_return'] / df_merged['回流人数']
+            video_rank_h(df=df_merged, now_date=now_date, rule_key=rule_key, param=rule_param, data_key=data_key)
+
+        else:
+            df_list = [feature_df[feature_df['apptype'] == apptype] for apptype, _ in data_param.items()]
+            df_merged = reduce(merge_df, df_list)
+            score_df = cal_score(df=df_merged, param=rule_param)
+            video_rank_h(df=score_df, now_date=now_date, rule_key=rule_key, param=rule_param, data_key=data_key)
+
+
+def rank_bottom(now_date, rule_params):
+    """未按时更新数据,用前一天数据作为当前的数据"""
+    redis_helper = RedisHelper()
+    redis_dt = datetime.strftime(now_date - timedelta(days=1), '%Y%m%d')
+    key_prefix_list = [config_.RECALL_KEY_NAME_PREFIX_30DAY]
+
+    for param in rule_params.get('params_list'):
+        data_key = param.get('data')
+        rule_key = param.get('rule')
+        log_.info(f"data_key = {data_key}, rule_key = {rule_key}")
+        for key_prefix in key_prefix_list:
+            key_name = f"{key_prefix}{data_key}:{rule_key}:{redis_dt}"
+            initial_data = redis_helper.get_all_data_from_zset(key_name=key_name, with_scores=True)
+            if initial_data is None:
+                initial_data = []
+            final_data = dict()
+            for video_id, score in initial_data:
+                final_data[video_id] = score
+            # 存入对应的redis
+            final_key_name = \
+                f"{key_prefix}{data_key}:{rule_key}:{datetime.strftime(now_date, '%Y%m%d')}"
+            if len(final_data) > 0:
+                redis_helper.add_data_with_zset(key_name=final_key_name, data=final_data, expire_time=2 * 24 * 3600)
+
+
+def timer_check():
+    project = config_.PROJECT_30DAY_APP_TYPE
+    table = config_.TABLE_30DAY_APP_TYPE
+    rule_params = config_.RULE_PARAMS_30DAY_APP_TYPE
+    now_date = datetime.today()
+    log_.info(f"now_date: {datetime.strftime(now_date, '%Y%m%d%H')}")
+    now_h = datetime.now().hour
+    # 查看当前天级更新的数据是否已准备好
+    data_count = data_check(project=project, table=table, now_date=now_date)
+    if data_count > 0:
+        log_.info(f'day_by30day_data_count = {data_count}')
+        # 数据准备好,进行更新
+        rank(now_date=now_date, rule_params=rule_params, project=project, table=table)
+    elif now_h > 22:
+        log_.info('day_by30day_recall data is None!')
+        rank_bottom(now_date=now_date, rule_params=rule_params)
+    else:
+        # 数据没准备好,5分钟后重新检查
+        Timer(5 * 60, timer_check).start()
+
+
+if __name__ == '__main__':
+    timer_check()