Browse Source

Merge branch 'abtest-117-20220818' into pre-master

liqian 2 years ago
parent
commit
7728b6e481
6 changed files with 237 additions and 107 deletions
  1. 31 23
      check_video_limit_distribute.py
  2. 23 14
      config.py
  3. 2 2
      redis_data_monitor.py
  4. 59 17
      region_rule_rank_h.py
  5. 49 9
      region_rule_rank_h_by24h.py
  6. 73 42
      rule_rank_h_by_24h.py

+ 31 - 23
check_video_limit_distribute.py

@@ -115,29 +115,37 @@ def process_with_region(data_key, rule_key, region, stop_distribute_video_id_lis
     #     expire_time=2 * 3600
     # )
 
-    if rule_key == 'rule4':
-        key_prefix_list = [
-            config_.RECALL_KEY_NAME_PREFIX_REGION_BY_H,  # 地域分组小时级列表
-            config_.RECALL_KEY_NAME_PREFIX_DUP1_REGION_24H_H,  # 地域分组相对24h列表
-            config_.RECALL_KEY_NAME_PREFIX_DUP2_REGION_24H_H,  # 不区分地域相对24h列表
-            config_.RECALL_KEY_NAME_PREFIX_DUP3_REGION_24H_H,  # 不区分地域相对24h列表2
-            config_.RECALL_KEY_NAME_PREFIX_DUP_REGION_H,  # 大列表
-        ]
-    elif rule_key == 'rule5':
-        key_prefix_list = [
-            config_.RECALL_KEY_NAME_PREFIX_REGION_BY_H,  # 地域分组小时级列表
-            config_.RECALL_KEY_NAME_PREFIX_DUP1_REGION_24H_H,  # 地域分组相对24h列表
-            config_.RECALL_KEY_NAME_PREFIX_DUP2_REGION_48H_H,  # 不区分地域相对48h列表
-            config_.RECALL_KEY_NAME_PREFIX_DUP3_REGION_48H_H,  # 不区分地域相对48h列表2
-            config_.RECALL_KEY_NAME_PREFIX_DUP_REGION_H,  # 大列表
-        ]
-    else:
-        key_prefix_list = [
-            config_.RECALL_KEY_NAME_PREFIX_REGION_BY_H,  # 地域分组小时级列表
-            config_.RECALL_KEY_NAME_PREFIX_DUP1_REGION_24H_H,  # 地域分组相对24h列表
-            config_.RECALL_KEY_NAME_PREFIX_DUP2_REGION_24H_H,  # 不区分地域相对24h列表
-            config_.RECALL_KEY_NAME_PREFIX_DUP_REGION_H,  # 大列表
-        ]
+    key_prefix_list = [
+        config_.RECALL_KEY_NAME_PREFIX_REGION_BY_H,  # 地域分组小时级列表
+        config_.RECALL_KEY_NAME_PREFIX_DUP1_REGION_24H_H,  # 地域分组相对24h列表
+        config_.RECALL_KEY_NAME_PREFIX_DUP2_REGION_24H_H,  # 不区分地域相对24h列表
+        config_.RECALL_KEY_NAME_PREFIX_DUP3_REGION_24H_H,  # 不区分地域相对24h列表2
+        config_.RECALL_KEY_NAME_PREFIX_DUP_REGION_H,  # 大列表
+    ]
+
+    # if rule_key == 'rule4':
+    #     key_prefix_list = [
+    #         config_.RECALL_KEY_NAME_PREFIX_REGION_BY_H,  # 地域分组小时级列表
+    #         config_.RECALL_KEY_NAME_PREFIX_DUP1_REGION_24H_H,  # 地域分组相对24h列表
+    #         config_.RECALL_KEY_NAME_PREFIX_DUP2_REGION_24H_H,  # 不区分地域相对24h列表
+    #         config_.RECALL_KEY_NAME_PREFIX_DUP3_REGION_24H_H,  # 不区分地域相对24h列表2
+    #         config_.RECALL_KEY_NAME_PREFIX_DUP_REGION_H,  # 大列表
+    #     ]
+    # elif rule_key == 'rule5':
+    #     key_prefix_list = [
+    #         config_.RECALL_KEY_NAME_PREFIX_REGION_BY_H,  # 地域分组小时级列表
+    #         config_.RECALL_KEY_NAME_PREFIX_DUP1_REGION_24H_H,  # 地域分组相对24h列表
+    #         config_.RECALL_KEY_NAME_PREFIX_DUP2_REGION_48H_H,  # 不区分地域相对48h列表
+    #         config_.RECALL_KEY_NAME_PREFIX_DUP3_REGION_48H_H,  # 不区分地域相对48h列表2
+    #         config_.RECALL_KEY_NAME_PREFIX_DUP_REGION_H,  # 大列表
+    #     ]
+    # else:
+    #     key_prefix_list = [
+    #         config_.RECALL_KEY_NAME_PREFIX_REGION_BY_H,  # 地域分组小时级列表
+    #         config_.RECALL_KEY_NAME_PREFIX_DUP1_REGION_24H_H,  # 地域分组相对24h列表
+    #         config_.RECALL_KEY_NAME_PREFIX_DUP2_REGION_24H_H,  # 不区分地域相对24h列表
+    #         config_.RECALL_KEY_NAME_PREFIX_DUP_REGION_H,  # 大列表
+    #     ]
 
     for key_prefix in key_prefix_list:
         key_name = f"{key_prefix}{region}:{data_key}:{rule_key}:" \

+ 23 - 14
config.py

@@ -128,14 +128,14 @@ class BaseConfig(object):
 
     # ##### 区分appType数据
     DATA_PARAMS = {
-        'data1': [APP_TYPE['VLOG'], ],  # vlog
-        'data2': [APP_TYPE['VLOG'], APP_TYPE['LONG_VIDEO'], ],  # [vlog, 内容精选]
-        'data3': [APP_TYPE['VLOG'], APP_TYPE['LOVE_LIVE'], ],  # [vlog, 票圈视频]
-        'data4': [APP_TYPE['VLOG'], APP_TYPE['SHORT_VIDEO'], ],  # [vlog, 票圈短视频]
-        'data5': [APP_TYPE['VLOG'], APP_TYPE['ZUI_JING_QI']],  # [vlog, 最惊奇]
-        'data6': [APP_TYPE['VLOG'], APP_TYPE['LOVE_LIVE'], APP_TYPE['LONG_VIDEO'], APP_TYPE['SHORT_VIDEO']],
-        'data7': [APP_TYPE['VLOG'], APP_TYPE['LOVE_LIVE'], APP_TYPE['LONG_VIDEO'], APP_TYPE['SHORT_VIDEO'],
-                  APP_TYPE['APP']],
+        'data1': {APP_TYPE['VLOG']: 0},  # vlog
+        'data2': {APP_TYPE['VLOG']: 0, APP_TYPE['LONG_VIDEO']: 0},  # [vlog, 内容精选]
+        # 'data3': [APP_TYPE['VLOG'], APP_TYPE['LOVE_LIVE'], ],  # [vlog, 票圈视频]
+        # 'data4': [APP_TYPE['VLOG'], APP_TYPE['SHORT_VIDEO'], ],  # [vlog, 票圈短视频]
+        # 'data5': [APP_TYPE['VLOG'], APP_TYPE['ZUI_JING_QI']],  # [vlog, 最惊奇]
+        'data6': {APP_TYPE['VLOG']: 0.25, APP_TYPE['LOVE_LIVE']: 0.25, APP_TYPE['LONG_VIDEO']: 0.25, APP_TYPE['SHORT_VIDEO']: 0.25},
+        # 'data7': [APP_TYPE['VLOG'], APP_TYPE['LOVE_LIVE'], APP_TYPE['LONG_VIDEO'], APP_TYPE['SHORT_VIDEO'],
+        #           APP_TYPE['APP']],
     }
 
     # 小时级更新过去48h数据 loghubods.video_data_each_hour_dataset_48h_total_apptype
@@ -162,10 +162,12 @@ class BaseConfig(object):
     # 小时级更新过去24h数据规则参数
     RULE_PARAMS_24H_APP_TYPE = {
         'rule_params': {
-            'rule2': {'cal_score_func': 2, 'return_count': 40, 'platform_return_rate': 0.001,
-                      'view_type': 'preview'},
+            # 'rule2': {'cal_score_func': 2, 'return_count': 40, 'platform_return_rate': 0.001,
+            #           'view_type': 'preview'},
             'rule3': {'cal_score_func': 2, 'return_count': 100, 'platform_return_rate': 0.001,
                       'view_type': 'preview'},
+            'rule4': {'cal_score_func': 2, 'return_count': 100, 'platform_return_rate': 0.001,
+                      'view_type': 'preview', 'merge_func': 2},
         },
         'data_params': DATA_PARAMS,
         'params_list': [
@@ -177,6 +179,7 @@ class BaseConfig(object):
             # {'data': 'data4', 'rule': 'rule2'},
             # {'data': 'data7', 'rule': 'rule2'},
             # {'data': 'data6', 'rule': 'rule2'},
+            {'data': 'data6', 'rule': 'rule4'},
         ]
     }
 
@@ -191,6 +194,8 @@ class BaseConfig(object):
                       'platform_return_rate': 0.001},
             'rule3': {'view_type': 'preview', 'return_count': 21, 'score_rule': 0,
                       'platform_return_rate': 0.001},
+            'rule4': {'view_type': 'video-show', 'return_count': 21, 'score_rule': 0,
+                      'platform_return_rate': 0.001, 'merge_func': 2},
         },
         'data_params': DATA_PARAMS,
         'params_list': [
@@ -200,6 +205,7 @@ class BaseConfig(object):
             # {'data': 'data4', 'rule': 'rule2'},
             # {'data': 'data6', 'rule': 'rule2'},
             # {'data': 'data7', 'rule': 'rule3'},
+            {'data': 'data6', 'rule': 'rule4'},
         ]
     }
 
@@ -211,12 +217,14 @@ class BaseConfig(object):
     RULE_PARAMS_REGION_APP_TYPE = {
         'rule_params': {
             # 'rule2': {'view_type': 'video-show', 'platform_return_rate': 0.001, 'region_24h_rule_key': 'rule2'},
-            'rule3': {'view_type': 'video-show-region', 'platform_return_rate': 0.001,
-                      'region_24h_rule_key': 'rule2', '24h_rule_key': 'rule2'},
+            # 'rule3': {'view_type': 'video-show-region', 'platform_return_rate': 0.001,
+            #           'region_24h_rule_key': 'rule2', '24h_rule_key': 'rule2'},
             'rule4': {'view_type': 'video-show-region', 'platform_return_rate': 0.001,
                       'region_24h_rule_key': 'rule2', '24h_rule_key': 'rule3'},
-            'rule6': {'view_type': 'preview', 'platform_return_rate': 0.001,
-                      'region_24h_rule_key': 'rule3', '24h_rule_key': 'rule2'},
+            # 'rule6': {'view_type': 'preview', 'platform_return_rate': 0.001,
+            #           'region_24h_rule_key': 'rule3', '24h_rule_key': 'rule2'},
+            'rule7': {'view_type': 'preview', 'platform_return_rate': 0.001,
+                      'region_24h_rule_key': 'rule4', '24h_rule_key': 'rule4', 'merge_func': 2},
         },
         'data_params': DATA_PARAMS,
         'params_list': [
@@ -228,6 +236,7 @@ class BaseConfig(object):
             # {'data': 'data4', 'rule': 'rule3'},
             # {'data': 'data6', 'rule': 'rule3'},
             # {'data': 'data7', 'rule': 'rule6'},
+            {'data': 'data6', 'rule': 'rule7'},
         ],
     }
 

+ 2 - 2
redis_data_monitor.py

@@ -36,8 +36,8 @@ def region_data_monitor(now_date, now_h, rule_params, key_prefix_dict):
         data_key = param.get('data')
         rule_key = param.get('rule')
         for key_con, key_prefix in key_prefix_dict.items():
-            if key_con == '不区分地域相对24h筛选后剩余去重后数据' and rule_key != 'rule4':
-                continue
+            # if key_con == '不区分地域相对24h筛选后剩余去重后数据' and rule_key != 'rule4':
+            #     continue
             no_update_region_list = []
             for region in region_code_list:
                 region_key_name = f"{key_prefix}{region}:{data_key}:{rule_key}:{now_date}:{now_h}"

+ 59 - 17
region_rule_rank_h.py

@@ -267,14 +267,14 @@ def dup_to_redis(h_video_ids, now_date, now_h, rule_key, region_24h_rule_key, by
                                dup_key_name=h_24h_dup_key_name, region=region)
 
         # ##### 去重小程序相对24h 筛选后剩余数据 更新结果,并另存为redis中
-        if by_24h_rule_key == 'rule3':
-            other_h_24h_key_name = f"{config_.RECALL_KEY_NAME_PREFIX_BY_24H_OTHER}{data_key}:" \
-                                   f"{by_24h_rule_key}:{datetime.datetime.strftime(now_date, '%Y%m%d')}:{now_h}"
-            other_h_24h_dup_key_name = \
-                f"{config_.RECALL_KEY_NAME_PREFIX_DUP3_REGION_24H_H}{region}:{data_key}:{rule_key}:" \
-                f"{datetime.datetime.strftime(now_date, '%Y%m%d')}:{now_h}"
-            h_video_ids = dup_data(h_video_ids=h_video_ids, initial_key_name=other_h_24h_key_name,
-                                   dup_key_name=other_h_24h_dup_key_name, region=region)
+        # if by_24h_rule_key in ['rule3', 'rule4']:
+        other_h_24h_key_name = f"{config_.RECALL_KEY_NAME_PREFIX_BY_24H_OTHER}{data_key}:" \
+                               f"{by_24h_rule_key}:{datetime.datetime.strftime(now_date, '%Y%m%d')}:{now_h}"
+        other_h_24h_dup_key_name = \
+            f"{config_.RECALL_KEY_NAME_PREFIX_DUP3_REGION_24H_H}{region}:{data_key}:{rule_key}:" \
+            f"{datetime.datetime.strftime(now_date, '%Y%m%d')}:{now_h}"
+        h_video_ids = dup_data(h_video_ids=h_video_ids, initial_key_name=other_h_24h_key_name,
+                               dup_key_name=other_h_24h_dup_key_name, region=region)
 
     # ##### 去重小程序模型更新结果,并另存为redis中
     model_key_name = get_rov_redis_key(now_date=now_date)
@@ -458,6 +458,21 @@ def merge_df(df_left, df_right):
     return df_merged[feature_list]
 
 
+def merge_df_with_score(df_left, df_right):
+    """
+    df 按照[videoid, code]合并,平台回流人数、回流人数、分数 分别求和
+    :param df_left:
+    :param df_right:
+    :return:
+    """
+    df_merged = pd.merge(df_left, df_right, on=['videoid', 'code'], how='outer', suffixes=['_x', '_y'])
+    df_merged.fillna(0, inplace=True)
+    feature_list = ['videoid', 'code', 'lastonehour_return', 'platform_return', 'score']
+    for feature in feature_list[2:]:
+        df_merged[feature] = df_merged[f'{feature}_x'] + df_merged[f'{feature}_y']
+    return df_merged[feature_list]
+
+
 def process_with_region(region, df_merged, data_key, rule_key, rule_param, now_date, now_h, rule_rank_h_flag):
     log_.info(f"region = {region} start...")
     # 计算score
@@ -469,6 +484,14 @@ def process_with_region(region, df_merged, data_key, rule_key, rule_param, now_d
     log_.info(f"region = {region} end!")
 
 
+def process_with_region2(region, df_merged, data_key, rule_key, rule_param, now_date, now_h, rule_rank_h_flag):
+    log_.info(f"region = {region} start...")
+    region_score_df = df_merged[df_merged['code'] == region]
+    log_.info(f'region = {region}, region_score_df count = {len(region_score_df)}')
+    video_rank(df=region_score_df, now_date=now_date, now_h=now_h, region=region,
+               rule_key=rule_key, param=rule_param, data_key=data_key, rule_rank_h_flag=rule_rank_h_flag)
+    log_.info(f"region = {region} end!")
+
 
 def process_with_app_type(app_type, params, region_code_list, feature_df, now_date, now_h, rule_rank_h_flag):
     log_.info(f"app_type = {app_type} start...")
@@ -520,19 +543,38 @@ def process_with_param(param, data_params_item, rule_params_item, region_code_li
     data_key = param.get('data')
     data_param = data_params_item.get(data_key)
     log_.info(f"data_key = {data_key}, data_param = {data_param}")
-    df_list = [feature_df[feature_df['apptype'] == apptype] for apptype in data_param]
-    df_merged = reduce(merge_df, df_list)
-
     rule_key = param.get('rule')
     rule_param = rule_params_item.get(rule_key)
     log_.info(f"rule_key = {rule_key}, rule_param = {rule_param}")
-    task_list = [
-        gevent.spawn(process_with_region,
-                     region, df_merged, data_key, rule_key, rule_param, now_date, now_h, rule_rank_h_flag)
-        for region in region_code_list
-    ]
-    gevent.joinall(task_list)
+    merge_func = rule_param.get('merge_func', None)
+
+    if merge_func == 2:
+        score_df_list = []
+        for apptype, weight in data_param.items():
+            df = feature_df[feature_df['apptype'] == apptype]
+            # 计算score
+            score_df = cal_score(df=df, param=rule_param)
+            score_df['score'] = score_df['score'] * weight
+            score_df_list.append(score_df)
+        # 分数合并
+        df_merged = reduce(merge_df_with_score, score_df_list)
+        # 更新平台回流比
+        df_merged['platform_return_rate'] = df_merged['platform_return'] / df_merged['lastonehour_return']
+        task_list = [
+            gevent.spawn(process_with_region2,
+                         region, df_merged, data_key, rule_key, rule_param, now_date, now_h, rule_rank_h_flag)
+            for region in region_code_list
+        ]
+    else:
+        df_list = [feature_df[feature_df['apptype'] == apptype] for apptype in data_param]
+        df_merged = reduce(merge_df, df_list)
+        task_list = [
+            gevent.spawn(process_with_region,
+                         region, df_merged, data_key, rule_key, rule_param, now_date, now_h, rule_rank_h_flag)
+            for region in region_code_list
+        ]
 
+    gevent.joinall(task_list)
     log_.info(f"param = {param} end!")
 
 

+ 49 - 9
region_rule_rank_h_by24h.py

@@ -197,6 +197,21 @@ def merge_df(df_left, df_right):
     return df_merged[feature_list]
 
 
+def merge_df_with_score(df_left, df_right):
+    """
+    df 按照[videoid, code]合并,平台回流人数、回流人数、分数 分别求和
+    :param df_left:
+    :param df_right:
+    :return:
+    """
+    df_merged = pd.merge(df_left, df_right, on=['videoid', 'code'], how='outer', suffixes=['_x', '_y'])
+    df_merged.fillna(0, inplace=True)
+    feature_list = ['videoid', 'code', 'lastday_return', 'platform_return', 'score']
+    for feature in feature_list[2:]:
+        df_merged[feature] = df_merged[f'{feature}_x'] + df_merged[f'{feature}_y']
+    return df_merged[feature_list]
+
+
 def process_with_region(region, df_merged, data_key, rule_key, rule_param, now_date, now_h):
     log_.info(f"region = {region} start...")
     # 计算score
@@ -208,6 +223,15 @@ def process_with_region(region, df_merged, data_key, rule_key, rule_param, now_d
     log_.info(f"region = {region} end!")
 
 
+def process_with_region2(region, df_merged, data_key, rule_key, rule_param, now_date, now_h):
+    log_.info(f"region = {region} start...")
+    region_score_df = df_merged[df_merged['code'] == region]
+    log_.info(f'region = {region}, region_score_df count = {len(region_score_df)}')
+    video_rank(df=region_score_df, now_date=now_date, now_h=now_h, region=region,
+               rule_key=rule_key, param=rule_param, data_key=data_key)
+    log_.info(f"region = {region} end!")
+
+
 def process_with_app_type(app_type, params, region_code_list, feature_df, now_date, now_h):
     log_.info(f"app_type = {app_type} start...")
     data_params_item = params.get('data_params')
@@ -233,22 +257,38 @@ def process_with_app_type(app_type, params, region_code_list, feature_df, now_da
 
 def process_with_param(param, data_params_item, rule_params_item, region_code_list, feature_df, now_date, now_h):
     log_.info(f"param = {param} start...")
-
     data_key = param.get('data')
     data_param = data_params_item.get(data_key)
     log_.info(f"data_key = {data_key}, data_param = {data_param}")
-    df_list = [feature_df[feature_df['apptype'] == apptype] for apptype in data_param]
-    df_merged = reduce(merge_df, df_list)
-
     rule_key = param.get('rule')
     rule_param = rule_params_item.get(rule_key)
     log_.info(f"rule_key = {rule_key}, rule_param = {rule_param}")
-    task_list = [
-        gevent.spawn(process_with_region, region, df_merged, data_key, rule_key, rule_param, now_date, now_h)
-        for region in region_code_list
-    ]
-    gevent.joinall(task_list)
+    merge_func = rule_param.get('merge_func', None)
+    if merge_func == 2:
+        score_df_list = []
+        for apptype, weight in data_param.items():
+            df = feature_df[feature_df['apptype'] == apptype]
+            # 计算score
+            score_df = cal_score(df=df, param=rule_param)
+            score_df['score'] = score_df['score'] * weight
+            score_df_list.append(score_df)
+        # 分数合并
+        df_merged = reduce(merge_df_with_score, score_df_list)
+        # 更新平台回流比
+        df_merged['platform_return_rate'] = df_merged['platform_return'] / df_merged['lastday_return']
+        task_list = [
+            gevent.spawn(process_with_region2, region, df_merged, data_key, rule_key, rule_param, now_date, now_h)
+            for region in region_code_list
+        ]
+    else:
+        df_list = [feature_df[feature_df['apptype'] == apptype] for apptype, _ in data_param.items()]
+        df_merged = reduce(merge_df, df_list)
+        task_list = [
+            gevent.spawn(process_with_region, region, df_merged, data_key, rule_key, rule_param, now_date, now_h)
+            for region in region_code_list
+        ]
 
+    gevent.joinall(task_list)
     log_.info(f"param = {param} end!")
 
 

+ 73 - 42
rule_rank_h_by_24h.py

@@ -178,29 +178,29 @@ def video_rank_h(df, now_date, now_h, rule_key, param, data_key):
         # 清空线上过滤应用列表
         # redis_helper.del_keys(key_name=f"{config_.H_VIDEO_FILER_24H}{app_type}.{data_key}.{rule_key}")
 
-    if rule_key == 'rule3':
-        # 去重筛选结果,保留剩余数据并写入Redis
-        all_videos = df['videoid'].to_list()
-        log_.info(f'h_by24h_recall all videos count = {len(all_videos)}')
-        # 视频状态过滤
-        all_filtered_videos = filter_video_status(all_videos)
-        log_.info(f'all_filtered_videos count = {len(all_filtered_videos)}')
-        # 与筛选结果去重
-        other_videos = [video for video in all_filtered_videos if video not in day_video_ids]
-        log_.info(f'other_videos count = {len(other_videos)}')
-        # 写入对应的redis
-        other_24h_recall_result = {}
-        for video_id in other_videos:
-            score = df[df['videoid'] == video_id]['score']
-            other_24h_recall_result[int(video_id)] = float(score)
-        # other_h_24h_recall_key_name = \
-        #     f"{config_.RECALL_KEY_NAME_PREFIX_BY_24H_OTHER}{app_type}:{data_key}:{rule_key}:{now_dt}:{now_h}"
-        other_h_24h_recall_key_name = \
-            f"{config_.RECALL_KEY_NAME_PREFIX_BY_24H_OTHER}{data_key}:{rule_key}:{now_dt}:{now_h}"
-        if len(other_24h_recall_result) > 0:
-            log_.info(f"count = {len(other_24h_recall_result)}")
-            redis_helper.add_data_with_zset(key_name=other_h_24h_recall_key_name, data=other_24h_recall_result,
-                                            expire_time=2 * 3600)
+    # if rule_key in ['rule3', 'rule4']:
+    # 去重筛选结果,保留剩余数据并写入Redis
+    all_videos = df['videoid'].to_list()
+    log_.info(f'h_by24h_recall all videos count = {len(all_videos)}')
+    # 视频状态过滤
+    all_filtered_videos = filter_video_status(all_videos)
+    log_.info(f'all_filtered_videos count = {len(all_filtered_videos)}')
+    # 与筛选结果去重
+    other_videos = [video for video in all_filtered_videos if video not in day_video_ids]
+    log_.info(f'other_videos count = {len(other_videos)}')
+    # 写入对应的redis
+    other_24h_recall_result = {}
+    for video_id in other_videos:
+        score = df[df['videoid'] == video_id]['score']
+        other_24h_recall_result[int(video_id)] = float(score)
+    # other_h_24h_recall_key_name = \
+    #     f"{config_.RECALL_KEY_NAME_PREFIX_BY_24H_OTHER}{app_type}:{data_key}:{rule_key}:{now_dt}:{now_h}"
+    other_h_24h_recall_key_name = \
+        f"{config_.RECALL_KEY_NAME_PREFIX_BY_24H_OTHER}{data_key}:{rule_key}:{now_dt}:{now_h}"
+    if len(other_24h_recall_result) > 0:
+        log_.info(f"count = {len(other_24h_recall_result)}")
+        redis_helper.add_data_with_zset(key_name=other_h_24h_recall_key_name, data=other_24h_recall_result,
+                                        expire_time=2 * 3600)
 
     # 去重更新rov模型结果,并另存为redis中
     # initial_data_dup = {}
@@ -232,6 +232,21 @@ def merge_df(df_left, df_right):
     return df_merged[feature_list]
 
 
+def merge_df_with_score(df_left, df_right):
+    """
+    df 按照videoid合并,平台回流人数、回流人数、分数 分别求和
+    :param df_left:
+    :param df_right:
+    :return:
+    """
+    df_merged = pd.merge(df_left, df_right, on=['videoid'], how='outer', suffixes=['_x', '_y'])
+    df_merged.fillna(0, inplace=True)
+    feature_list = ['videoid', '回流人数', 'platform_return', 'score']
+    for feature in feature_list[1:]:
+        df_merged[feature] = df_merged[f'{feature}_x'] + df_merged[f'{feature}_y']
+    return df_merged[feature_list]
+
+
 def rank_by_h(now_date, now_h, rule_params, project, table):
     # 获取特征数据
     feature_df = get_feature_data(now_date=now_date, now_h=now_h, project=project, table=table)
@@ -239,6 +254,7 @@ def rank_by_h(now_date, now_h, rule_params, project, table):
     # rank
     data_params_item = rule_params.get('data_params')
     rule_params_item = rule_params.get('rule_params')
+    """
     for param in rule_params.get('params_list'):
         data_key = param.get('data')
         data_param = data_params_item.get(data_key)
@@ -257,31 +273,46 @@ def rank_by_h(now_date, now_h, rule_params, project, table):
             score_df = cal_score1(df=df_merged)
         video_rank_h(df=score_df, now_date=now_date, now_h=now_h,
                      rule_key=rule_key, param=rule_param, data_key=data_key)
-
     """
-    for app_type, params in rule_params.items():
-        log_.info(f"app_type = {app_type}")
-        data_params_item = params.get('data_params')
-        rule_params_item = params.get('rule_params')
-        for param in params.get('params_list'):
-            data_key = param.get('data')
-            data_param = data_params_item.get(data_key)
-            log_.info(f"data_key = {data_key}, data_param = {data_param}")
-            df_list = [feature_df[feature_df['apptype'] == apptype] for apptype in data_param]
-            df_merged = reduce(merge_df, df_list)
 
-            rule_key = param.get('rule')
-            rule_param = rule_params_item.get(rule_key)
-            log_.info(f"rule_key = {rule_key}, rule_param = {rule_param}")
-            # 计算score
-            cal_score_func = rule_param.get('cal_score_func', 1)
+    for param in rule_params.get('params_list'):
+        score_df_list = []
+        data_key = param.get('data')
+        data_param = data_params_item.get(data_key)
+        log_.info(f"data_key = {data_key}, data_param = {data_param}")
+        rule_key = param.get('rule')
+        rule_param = rule_params_item.get(rule_key)
+        log_.info(f"rule_key = {rule_key}, rule_param = {rule_param}")
+        cal_score_func = rule_param.get('cal_score_func', 1)
+        merge_func = rule_param.get('merge_func', 1)
+
+        if merge_func == 2:
+            for apptype, weight in data_param.items():
+                df = feature_df[feature_df['apptype'] == apptype]
+                # 计算score
+                if cal_score_func == 2:
+                    score_df = cal_score2(df=df, param=rule_param)
+                else:
+                    score_df = cal_score1(df=df)
+                score_df['score'] = score_df['score'] * weight
+                score_df_list.append(score_df)
+            # 分数合并
+            df_merged = reduce(merge_df_with_score, score_df_list)
+            # 更新平台回流比
+            df_merged['platform_return_rate'] = df_merged['platform_return'] / df_merged['回流人数']
+            video_rank_h(df=df_merged, now_date=now_date, now_h=now_h,
+                         rule_key=rule_key, param=rule_param, data_key=data_key)
+
+        else:
+            df_list = [feature_df[feature_df['apptype'] == apptype] for apptype, _ in data_param.items()]
+            df_merged = reduce(merge_df, df_list)
             if cal_score_func == 2:
                 score_df = cal_score2(df=df_merged, param=rule_param)
             else:
                 score_df = cal_score1(df=df_merged)
-            video_rank_h(df=score_df, now_date=now_date, now_h=now_h, rule_key=rule_key, param=rule_param,
-                         app_type=app_type, data_key=data_key)
-    """
+            video_rank_h(df=score_df, now_date=now_date, now_h=now_h,
+                         rule_key=rule_key, param=rule_param, data_key=data_key)
+
     #     # to-csv
     #     score_filename = f"score_by24h_{key}_{datetime.strftime(now_date, '%Y%m%d%H')}.csv"
     #     score_df.to_csv(f'./data/{score_filename}')