123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455 |
- # -*- coding: utf-8 -*-
- import multiprocessing
- import traceback
- import gevent
- import datetime
- import pandas as pd
- import math
- from functools import reduce
- from odps import ODPS
- from threading import Timer, Thread
- from my_utils import RedisHelper, get_data_from_odps, filter_video_status, check_table_partition_exits, \
- filter_video_status_app, send_msg_to_feishu
- from my_config import set_config
- from log import Log
- # os.environ['NUMEXPR_MAX_THREADS'] = '16'
- config_, _ = set_config()
- log_ = Log()
- region_code = config_.REGION_CODE
- RULE_PARAMS = {
- 'rule_params': {
- 'rule66': {'view_type': 'video-show', 'return_count': 21, 'score_rule': 0, 'platform_return_rate': 0.001},
- },
- 'data_params': config_.DATA_PARAMS,
- 'params_list': [
- {'data': 'data66', 'rule': 'rule66'},
- ]
- }
- features = [
- 'apptype',
- 'code', # 省份编码
- 'videoid',
- 'lastday_preview', # 昨日预曝光人数
- 'lastday_view', # 昨日曝光人数
- 'lastday_play', # 昨日播放人数
- 'lastday_share', # 昨日分享人数
- 'lastday_return', # 昨日回流人数
- 'lastday_preview_total', # 昨日预曝光次数
- 'lastday_view_total', # 昨日曝光次数
- 'lastday_play_total', # 昨日播放次数
- 'lastday_share_total', # 昨日分享次数
- 'platform_return',
- 'platform_preview',
- 'platform_preview_total',
- 'platform_show',
- 'platform_show_total',
- 'platform_view',
- 'platform_view_total',
- ]
- def get_rov_redis_key(now_date):
- """获取rov模型结果存放key"""
- redis_helper = RedisHelper()
- now_dt = datetime.datetime.strftime(now_date, '%Y%m%d')
- key_name = f'{config_.RECALL_KEY_NAME_PREFIX}{now_dt}'
- if not redis_helper.key_exists(key_name=key_name):
- pre_dt = datetime.datetime.strftime(now_date - datetime.timedelta(days=1), '%Y%m%d')
- key_name = f'{config_.RECALL_KEY_NAME_PREFIX}{pre_dt}'
- return key_name
- def data_check(project, table, now_date):
- """检查数据是否准备好"""
- odps = ODPS(
- access_id=config_.ODPS_CONFIG['ACCESSID'],
- secret_access_key=config_.ODPS_CONFIG['ACCESSKEY'],
- project=project,
- endpoint=config_.ODPS_CONFIG['ENDPOINT'],
- connect_timeout=3000,
- read_timeout=500000,
- pool_maxsize=1000,
- pool_connections=1000
- )
- try:
- dt = datetime.datetime.strftime(now_date, '%Y%m%d%H')
- check_res = check_table_partition_exits(date=dt, project=project, table=table)
- if check_res:
- sql = f'select * from {project}.{table} where dt = {dt}'
- with odps.execute_sql(sql=sql).open_reader() as reader:
- data_count = reader.count
- else:
- data_count = 0
- except Exception as e:
- data_count = 0
- return data_count
- def get_feature_data(project, table, now_date):
- """获取特征数据"""
- dt = datetime.datetime.strftime(now_date, '%Y%m%d%H')
- # dt = '2022041310'
- records = get_data_from_odps(date=dt, project=project, table=table)
- feature_data = []
- for record in records:
- item = {}
- for feature_name in features:
- item[feature_name] = record[feature_name]
- feature_data.append(item)
- feature_df = pd.DataFrame(feature_data)
- return feature_df
- def cal_score(df, param):
- """
- 计算score
- :param df: 特征数据
- :param param:
- :return:
- """
- # score计算公式: sharerate*backrate*logback*ctr
- # sharerate = lastday_share/(lastday_play+1000)
- # backrate = lastday_return/(lastday_share+10)
- # ctr = lastday_play/(lastday_preview+1000), 对ctr限最大值:K2 = 0.6 if ctr > 0.6 else ctr
- # score = sharerate * backrate * LOG(lastday_return+1) * K2
- df = df.fillna(0)
- df['share_rate'] = df['lastday_share'] / (df['lastday_play'] + 1000)
- df['back_rate'] = df['lastday_return'] / (df['lastday_share'] + 10)
- df['log_back'] = (df['lastday_return'] + 1).apply(math.log)
- if param.get('view_type', None) == 'video-show':
- df['ctr'] = df['lastday_play'] / (df['platform_show'] + 1000)
- else:
- df['ctr'] = df['lastday_play'] / (df['lastday_preview'] + 1000)
- df['K2'] = df['ctr'].apply(lambda x: 0.6 if x > 0.6 else x)
- df['platform_return_rate'] = df['platform_return'] / df['lastday_return']
- df['score1'] = df['share_rate'] * df['back_rate'] * df['log_back'] * df['K2']
- click_score_rate = param.get('click_score_rate', None)
- back_score_rate = param.get('click_score_rate', None)
- if click_score_rate is not None:
- df['score'] = (1 - click_score_rate) * df['score1'] + click_score_rate * df['K2']
- elif back_score_rate is not None:
- df['score'] = (1 - back_score_rate) * df['score1'] + back_score_rate * df['back_rate']
- else:
- df['score'] = df['score1']
- df = df.sort_values(by=['score'], ascending=False)
- return df
- def video_rank(df, now_date, now_h, rule_key, param, region, data_key):
- """
- 获取符合进入召回源条件的视频
- :param df:
- :param now_date:
- :param now_h:
- :param rule_key: 小时级数据进入条件
- :param param: 小时级数据进入条件参数
- :param region: 所属地域
- :return:
- """
- redis_helper = RedisHelper()
- # 获取符合进入召回源条件的视频
- return_count = param.get('return_count', 1)
- score_value = param.get('score_rule', 0)
- platform_return_rate = param.get('platform_return_rate', 0)
- h_recall_df = df[(df['lastday_return'] >= return_count) & (df['score'] >= score_value)
- & (df['platform_return_rate'] >= platform_return_rate)]
- # videoid重复时,保留分值高
- h_recall_df = h_recall_df.sort_values(by=['score'], ascending=False)
- h_recall_df = h_recall_df.drop_duplicates(subset=['videoid'], keep='first')
- h_recall_df['videoid'] = h_recall_df['videoid'].astype(int)
- h_recall_videos = h_recall_df['videoid'].to_list()
- log_.info(f"各种规则过滤后,一共有多少个视频 = {len(h_recall_videos)}")
- # 视频状态过滤
- if data_key in ['data7', ]:
- filtered_videos = filter_video_status_app(h_recall_videos)
- else:
- filtered_videos = filter_video_status(h_recall_videos)
- log_.info(f"视频状态-过滤后,一共有多少个视频 = {len(filtered_videos)}")
- # 写入对应的redis
- h_video_ids = []
- day_recall_result = {}
- for video_id in filtered_videos:
- score = h_recall_df[h_recall_df['videoid'] == video_id]['score']
- day_recall_result[int(video_id)] = float(score)
- h_video_ids.append(int(video_id))
- day_recall_key_name = \
- f"{config_.RECALL_KEY_NAME_PREFIX_REGION_BY_24H}{region}:{data_key}:{rule_key}:" \
- f"{datetime.datetime.strftime(now_date, '%Y%m%d')}:{now_h}"
- log_.info("打印地域24小时的某个地域{},redis key:{}".format(region, day_recall_key_name))
- if len(day_recall_result) > 0:
- log_.info(f"开始写入头部数据:count = {len(day_recall_result)}, key = {day_recall_key_name}")
- redis_helper.add_data_with_zset(key_name=day_recall_key_name, data=day_recall_result, expire_time=2 * 3600)
- else:
- log_.info(f"无数据,不写入。")
- # 清空线上过滤应用列表
- # redis_helper.del_keys(key_name=f"{config_.REGION_H_VIDEO_FILER_24H}{region}.{app_type}.{data_key}.{rule_key}")
- # 与其他召回视频池去重,存入对应的redis
- # dup_to_redis(h_video_ids=h_video_ids, now_date=now_date, now_h=now_h, rule_key=rule_key, region=region)
- def merge_df(df_left, df_right):
- """
- df按照videoid, code 合并,对应特征求和
- :param df_left:
- :param df_right:
- :return:
- """
- df_merged = pd.merge(df_left, df_right, on=['videoid', 'code'], how='outer', suffixes=['_x', '_y'])
- df_merged.fillna(0, inplace=True)
- feature_list = ['videoid', 'code']
- for feature in features:
- if feature in ['apptype', 'videoid', 'code']:
- continue
- df_merged[feature] = df_merged[f'{feature}_x'] + df_merged[f'{feature}_y']
- feature_list.append(feature)
- return df_merged[feature_list]
- def merge_df_with_score(df_left, df_right):
- """
- df 按照[videoid, code]合并,平台回流人数、回流人数、分数 分别求和
- :param df_left:
- :param df_right:
- :return:
- """
- df_merged = pd.merge(df_left, df_right, on=['videoid', 'code'], how='outer', suffixes=['_x', '_y'])
- df_merged.fillna(0, inplace=True)
- feature_list = ['videoid', 'code', 'lastday_return', 'platform_return', 'score']
- for feature in feature_list[2:]:
- df_merged[feature] = df_merged[f'{feature}_x'] + df_merged[f'{feature}_y']
- return df_merged[feature_list]
- def process_with_region(region, df_merged, data_key, rule_key, rule_param, now_date, now_h):
- log_.info(f"多协程的region = {region} 开始执行")
- region_df = df_merged[df_merged['code'] == region]
- log_.info(f'该区域region = {region}, 下有多少数据量 = {len(region_df)}')
- score_df = cal_score(df=region_df, param=rule_param)
- video_rank(df=score_df, now_date=now_date, now_h=now_h, region=region,
- rule_key=rule_key, param=rule_param, data_key=data_key)
- log_.info(f"多协程的region = {region} 完成执行")
- def process_with_region2(region, df_merged, data_key, rule_key, rule_param, now_date, now_h):
- log_.info(f"region = {region} start...")
- region_score_df = df_merged[df_merged['code'] == region]
- log_.info(f'region = {region}, region_score_df count = {len(region_score_df)}')
- video_rank(df=region_score_df, now_date=now_date, now_h=now_h, region=region,
- rule_key=rule_key, param=rule_param, data_key=data_key)
- log_.info(f"region = {region} end!")
- def process_with_app_type(app_type, params, region_code_list, feature_df, now_date, now_h):
- log_.info(f"app_type = {app_type} start...")
- data_params_item = params.get('data_params')
- rule_params_item = params.get('rule_params')
- for param in params.get('params_list'):
- data_key = param.get('data')
- data_param = data_params_item.get(data_key)
- log_.info(f"data_key = {data_key}, data_param = {data_param}")
- df_list = [feature_df[feature_df['apptype'] == apptype] for apptype in data_param]
- df_merged = reduce(merge_df, df_list)
- rule_key = param.get('rule')
- rule_param = rule_params_item.get(rule_key)
- log_.info(f"rule_key = {rule_key}, rule_param = {rule_param}")
- task_list = [
- gevent.spawn(process_with_region, region, df_merged, app_type, data_key, rule_key, rule_param,
- now_date, now_h)
- for region in region_code_list
- ]
- gevent.joinall(task_list)
- log_.info(f"app_type = {app_type} end!")
- def process_with_param(param, data_params_item, rule_params_item, region_code_list, feature_df, now_date, now_h):
- data_key = param.get('data')
- data_param = data_params_item.get(data_key)
- rule_key = param.get('rule')
- rule_param = rule_params_item.get(rule_key)
- merge_func = rule_param.get('merge_func', None)
- log_.info("数据采用:{},统计采用{}.".format(data_key, rule_key))
- log_.info("具体的规则是:{}.".format(rule_param))
- if merge_func == 2:
- score_df_list = []
- for apptype, weight in data_param.items():
- df = feature_df[feature_df['apptype'] == apptype]
- # 计算score
- score_df = cal_score(df=df, param=rule_param)
- score_df['score'] = score_df['score'] * weight
- score_df_list.append(score_df)
- # 分数合并
- df_merged = reduce(merge_df_with_score, score_df_list)
- # 更新平台回流比
- df_merged['platform_return_rate'] = df_merged['platform_return'] / df_merged['lastday_return']
- task_list = [
- gevent.spawn(process_with_region2, region, df_merged, data_key, rule_key, rule_param, now_date, now_h)
- for region in region_code_list
- ]
- else:
- df_list = [feature_df[feature_df['apptype'] == apptype] for apptype, _ in data_param.items()]
- df_merged = reduce(merge_df, df_list)
- task_list = [
- gevent.spawn(process_with_region, region, df_merged, data_key, rule_key, rule_param, now_date, now_h)
- for region in region_code_list
- ]
- gevent.joinall(task_list)
- log_.info(f"多进程的 param = {param} 完成执行!")
- def rank_by_24h(project, table, now_date, now_h, rule_params, region_code_list):
- # 获取特征数据
- feature_df = get_feature_data(project=project, table=table, now_date=now_date)
- feature_df['apptype'] = feature_df['apptype'].astype(int)
- # rank
- data_params_item = rule_params.get('data_params')
- rule_params_item = rule_params.get('rule_params')
- params_list = rule_params.get('params_list')
- pool = multiprocessing.Pool(processes=len(params_list))
- for param in params_list:
- pool.apply_async(
- func=process_with_param,
- args=(param, data_params_item, rule_params_item, region_code_list, feature_df, now_date, now_h)
- )
- pool.close()
- pool.join()
- """
- pool = multiprocessing.Pool(processes=len(config_.APP_TYPE))
- for app_type, params in rule_params.items():
- pool.apply_async(func=process_with_app_type,
- args=(app_type, params, region_code_list, feature_df, now_date, now_h))
- pool.close()
- pool.join()
- """
- def dup_to_redis(h_video_ids, now_date, now_h, rule_key, region):
- """将地域分组小时级数据与其他召回视频池去重,存入对应的redis"""
- redis_helper = RedisHelper()
- # ##### 去重小程序天级更新结果,并另存为redis中
- day_key_name = f"{config_.RECALL_KEY_NAME_PREFIX_BY_DAY}rule2.{datetime.datetime.strftime(now_date, '%Y%m%d')}"
- if redis_helper.key_exists(key_name=day_key_name):
- day_data = redis_helper.get_all_data_from_zset(key_name=day_key_name, with_scores=True)
- log_.info(f'day data count = {len(day_data)}')
- day_dup = {}
- for video_id, score in day_data:
- if int(video_id) not in h_video_ids:
- day_dup[int(video_id)] = score
- h_video_ids.append(int(video_id))
- log_.info(f"day data dup count = {len(day_dup)}")
- day_dup_key_name = \
- f"{config_.RECALL_KEY_NAME_PREFIX_DUP_REGION_DAY_24H}{region}.{rule_key}." \
- f"{datetime.datetime.strftime(now_date, '%Y%m%d')}.{now_h}"
- if len(day_dup) > 0:
- redis_helper.add_data_with_zset(key_name=day_dup_key_name, data=day_dup, expire_time=23 * 3600)
- # ##### 去重小程序模型更新结果,并另存为redis中
- model_key_name = get_rov_redis_key(now_date=now_date)
- model_data = redis_helper.get_all_data_from_zset(key_name=model_key_name, with_scores=True)
- log_.info(f'model data count = {len(model_data)}')
- model_data_dup = {}
- for video_id, score in model_data:
- if int(video_id) not in h_video_ids:
- model_data_dup[int(video_id)] = score
- h_video_ids.append(int(video_id))
- log_.info(f"model data dup count = {len(model_data_dup)}")
- model_data_dup_key_name = \
- f"{config_.RECALL_KEY_NAME_PREFIX_DUP_REGION_24H}{region}.{rule_key}." \
- f"{datetime.datetime.strftime(now_date, '%Y%m%d')}.{now_h}"
- if len(model_data_dup) > 0:
- redis_helper.add_data_with_zset(key_name=model_data_dup_key_name, data=model_data_dup, expire_time=23 * 3600)
- def h_rank_bottom(now_date, now_h, rule_params, region_code_list):
- """未按时更新数据,用上一小时结果作为当前小时的数据"""
- redis_helper = RedisHelper()
- if now_h == 0:
- redis_dt = datetime.datetime.strftime(now_date - datetime.timedelta(days=1), '%Y%m%d')
- redis_h = 23
- else:
- redis_dt = datetime.datetime.strftime(now_date, '%Y%m%d')
- redis_h = now_h - 1
- # 以上一小时的地域分组数据作为当前小时的数据
- key_prefix = config_.RECALL_KEY_NAME_PREFIX_REGION_BY_24H
- for param in rule_params.get('params_list'):
- data_key = param.get('data')
- rule_key = param.get('rule')
- log_.info(f"data_key = {data_key}, rule_key = {rule_key}")
- for region in region_code_list:
- log_.info(f"region = {region}")
- key_name = f"{key_prefix}{region}:{data_key}:{rule_key}:{redis_dt}:{redis_h}"
- initial_data = redis_helper.get_all_data_from_zset(key_name=key_name, with_scores=True)
- if initial_data is None:
- initial_data = []
- final_data = dict()
- h_video_ids = []
- for video_id, score in initial_data:
- final_data[video_id] = score
- h_video_ids.append(int(video_id))
- # 存入对应的redis
- final_key_name = \
- f"{key_prefix}{region}:{data_key}:{rule_key}:{datetime.datetime.strftime(now_date, '%Y%m%d')}:{now_h}"
- if len(final_data) > 0:
- redis_helper.add_data_with_zset(key_name=final_key_name, data=final_data, expire_time=2 * 3600)
- def h_timer_check():
- try:
- rule_params = RULE_PARAMS
- project = config_.PROJECT_REGION_24H_APP_TYPE
- table = config_.TABLE_REGION_24H_APP_TYPE
- region_code_list = [code for region, code in region_code.items() if code != '-1']
- now_date = datetime.datetime.today()
- now_h = datetime.datetime.now().hour
- now_min = datetime.datetime.now().minute
- log_.info(f"开始执行: {datetime.datetime.strftime(now_date, '%Y%m%d%H')}")
- # 查看当天更新的数据是否已准备好
- h_data_count = data_check(project=project, table=table, now_date=now_date)
- if h_data_count > 0:
- log_.info('上游数据表查询数据条数 h_data_count = {},开始计算。'.format(h_data_count))
- rank_by_24h(now_date=now_date, now_h=now_h, rule_params=rule_params,
- project=project, table=table, region_code_list=region_code_list)
- log_.info("数据3----------正常完成----------")
- elif now_min > 40:
- log_.info('当前分钟超过40,预计执行无法完成,使用 bottom data!')
- h_rank_bottom(now_date=now_date, now_h=now_h, rule_params=rule_params, region_code_list=region_code_list)
- log_.info('----------当前分钟超过40,使用bottom的data,完成----------')
- else:
- # 数据没准备好,1分钟后重新检查
- log_.info("上游数据未就绪,等待...")
- Timer(60, h_timer_check).start()
- except Exception as e:
- log_.error(f"地域分组24h数据更新失败, exception: {e}, traceback: {traceback.format_exc()}")
- send_msg_to_feishu(
- webhook=config_.FEISHU_ROBOT['server_robot'].get('webhook'),
- key_word=config_.FEISHU_ROBOT['server_robot'].get('key_word'),
- msg_text=f"rov-offline{config_.ENV_TEXT} - 地域分组24h数据更新失败\n"
- f"exception: {e}\n"
- f"traceback: {traceback.format_exc()}"
- )
- if __name__ == '__main__':
- log_.info("文件alg_recsys_recall_24h_region.py:「24小时地域」 开始执行")
- h_timer_check()
|