rule_rank_h_by_24h.py 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. import pandas as pd
  2. import math
  3. from odps import ODPS
  4. from threading import Timer
  5. from datetime import datetime, timedelta
  6. from get_data import get_data_from_odps
  7. from db_helper import RedisHelper
  8. from utils import filter_video_status
  9. from config import set_config
  10. from log import Log
  11. config_, _ = set_config()
  12. log_ = Log()
  13. features = [
  14. 'videoid',
  15. 'preview人数', # 过去24h预曝光人数
  16. 'view人数', # 过去24h曝光人数
  17. 'play人数', # 过去24h播放人数
  18. 'share人数', # 过去24h分享人数
  19. '回流人数', # 过去24h分享,过去24h回流人数
  20. 'preview次数', # 过去24h预曝光次数
  21. 'view次数', # 过去24h曝光次数
  22. 'play次数', # 过去24h播放次数
  23. 'share次数', # 过去24h分享次数
  24. 'platform_return',
  25. 'platform_preview',
  26. 'platform_preview_total',
  27. 'platform_show',
  28. 'platform_show_total',
  29. 'platform_view',
  30. 'platform_view_total',
  31. ]
  32. def get_rov_redis_key(now_date):
  33. # 获取rov模型结果存放key
  34. redis_helper = RedisHelper()
  35. now_dt = datetime.strftime(now_date, '%Y%m%d')
  36. key_name = f'{config_.RECALL_KEY_NAME_PREFIX}{now_dt}'
  37. if not redis_helper.key_exists(key_name=key_name):
  38. pre_dt = datetime.strftime(now_date - timedelta(days=1), '%Y%m%d')
  39. key_name = f'{config_.RECALL_KEY_NAME_PREFIX}{pre_dt}'
  40. return key_name
  41. def h_data_check(project, table, now_date, now_h):
  42. """检查数据是否准备好"""
  43. odps = ODPS(
  44. access_id=config_.ODPS_CONFIG['ACCESSID'],
  45. secret_access_key=config_.ODPS_CONFIG['ACCESSKEY'],
  46. project=project,
  47. endpoint=config_.ODPS_CONFIG['ENDPOINT'],
  48. connect_timeout=3000,
  49. read_timeout=500000,
  50. pool_maxsize=1000,
  51. pool_connections=1000
  52. )
  53. try:
  54. # 23点开始到8点之前(不含8点),全部用22点生成那个列表
  55. if now_h == 23:
  56. dt = datetime.strftime(now_date - timedelta(hours=1), '%Y%m%d%H')
  57. elif now_h < 8:
  58. dt = f"{datetime.strftime(now_date - timedelta(days=1), '%Y%m%d')}22"
  59. else:
  60. dt = datetime.strftime(now_date, '%Y%m%d%H')
  61. sql = f'select * from {project}.{table} where dt = {dt}'
  62. with odps.execute_sql(sql=sql).open_reader() as reader:
  63. data_count = reader.count
  64. except Exception as e:
  65. data_count = 0
  66. return data_count
  67. def get_feature_data(now_date, now_h, project, table):
  68. """获取特征数据"""
  69. # 23点开始到8点之前(不含8点),全部用22点生成那个列表
  70. if now_h == 23:
  71. dt = datetime.strftime(now_date - timedelta(hours=1), '%Y%m%d%H')
  72. elif now_h < 8:
  73. dt = f"{datetime.strftime(now_date - timedelta(days=1), '%Y%m%d')}22"
  74. else:
  75. dt = datetime.strftime(now_date, '%Y%m%d%H')
  76. log_.info({'feature_dt': dt})
  77. # dt = '20220425'
  78. records = get_data_from_odps(date=dt, project=project, table=table)
  79. feature_data = []
  80. for record in records:
  81. item = {}
  82. for feature_name in features:
  83. item[feature_name] = record[feature_name]
  84. feature_data.append(item)
  85. feature_df = pd.DataFrame(feature_data)
  86. return feature_df
  87. def cal_score1(df):
  88. # score1计算公式: score = 回流人数/(view人数+10000)
  89. df = df.fillna(0)
  90. df['score'] = df['回流人数'] / (df['view人数'] + 1000)
  91. df = df.sort_values(by=['score'], ascending=False)
  92. return df
  93. def cal_score2(df):
  94. # score2计算公式: score = share次数/(view+1000)+0.01*return/(share次数+100)
  95. df = df.fillna(0)
  96. df['share_rate'] = df['share次数'] / (df['view人数'] + 1000)
  97. df['back_rate'] = df['回流人数'] / (df['share次数'] + 100)
  98. df['score'] = df['share_rate'] + 0.01 * df['back_rate']
  99. df['platform_return_rate'] = df['platform_return'] / df['回流人数']
  100. df = df.sort_values(by=['score'], ascending=False)
  101. return df
  102. def video_rank_h(df, now_date, now_h, rule_key, param):
  103. """
  104. 获取符合进入召回源条件的视频,与每日更新的rov模型结果视频列表进行合并
  105. :param df:
  106. :param now_date:
  107. :param now_h:
  108. :param rule_key: 天级规则数据进入条件
  109. :param param: 天级规则数据进入条件参数
  110. :return:
  111. """
  112. # 获取rov模型结果
  113. redis_helper = RedisHelper()
  114. key_name = get_rov_redis_key(now_date=now_date)
  115. initial_data = redis_helper.get_data_zset_with_index(key_name=key_name, start=0, end=-1, with_scores=True)
  116. log_.info(f'initial data count = {len(initial_data)}')
  117. # 获取符合进入召回源条件的视频
  118. return_count = param.get('return_count')
  119. if return_count:
  120. day_recall_df = df[df['回流人数'] > return_count]
  121. else:
  122. day_recall_df = df
  123. platform_return_rate = param.get('platform_return_rate', 0)
  124. day_recall_df = day_recall_df[day_recall_df['platform_return_rate'] > platform_return_rate]
  125. # videoid重复时,保留分值高
  126. day_recall_df = day_recall_df.sort_values(by=['score'], ascending=False)
  127. day_recall_df = day_recall_df.drop_duplicates(subset=['videoid'], keep='first')
  128. day_recall_df['videoid'] = day_recall_df['videoid'].astype(int)
  129. day_recall_videos = day_recall_df['videoid'].to_list()
  130. log_.info(f'h_by24h_recall videos count = {len(day_recall_videos)}')
  131. # 视频状态过滤
  132. filtered_videos = filter_video_status(day_recall_videos)
  133. log_.info('filtered_videos count = {}'.format(len(filtered_videos)))
  134. # 写入对应的redis
  135. now_dt = datetime.strftime(now_date, '%Y%m%d')
  136. day_video_ids = []
  137. day_recall_result = {}
  138. for video_id in filtered_videos:
  139. score = day_recall_df[day_recall_df['videoid'] == video_id]['score']
  140. day_recall_result[int(video_id)] = float(score)
  141. day_video_ids.append(int(video_id))
  142. day_recall_key_name = \
  143. f"{config_.RECALL_KEY_NAME_PREFIX_BY_24H}{rule_key}.{now_dt}.{now_h}"
  144. if len(day_recall_result) > 0:
  145. redis_helper.add_data_with_zset(key_name=day_recall_key_name, data=day_recall_result, expire_time=23 * 3600)
  146. # 清空线上过滤应用列表
  147. redis_helper.del_keys(key_name=f"{config_.H_VIDEO_FILER_24H}{rule_key}")
  148. # 去重更新rov模型结果,并另存为redis中
  149. initial_data_dup = {}
  150. for video_id, score in initial_data:
  151. if int(video_id) not in day_video_ids:
  152. initial_data_dup[int(video_id)] = score
  153. log_.info(f"initial data dup count = {len(initial_data_dup)}")
  154. initial_key_name = f"{config_.RECALL_KEY_NAME_PREFIX_DUP_24H}{rule_key}.{now_dt}.{now_h}"
  155. if len(initial_data_dup) > 0:
  156. redis_helper.add_data_with_zset(key_name=initial_key_name, data=initial_data_dup, expire_time=23 * 3600)
  157. def rank_by_h(now_date, now_h, rule_params, project, table):
  158. # 获取特征数据
  159. feature_df = get_feature_data(now_date=now_date, now_h=now_h, project=project, table=table)
  160. # rank
  161. for key, value in rule_params.items():
  162. log_.info(f"rule = {key}, param = {value}")
  163. # 计算score
  164. cal_score_func = value.get('cal_score_func', 1)
  165. if cal_score_func == 2:
  166. score_df = cal_score2(df=feature_df)
  167. else:
  168. score_df = cal_score1(df=feature_df)
  169. video_rank_h(df=score_df, now_date=now_date, now_h=now_h, rule_key=key, param=value)
  170. # to-csv
  171. score_filename = f"score_by24h_{key}_{datetime.strftime(now_date, '%Y%m%d%H')}.csv"
  172. score_df.to_csv(f'./data/{score_filename}')
  173. # to-logs
  174. log_.info({"date": datetime.strftime(now_date, '%Y%m%d%H'),
  175. "redis_key_prefix": config_.RECALL_KEY_NAME_PREFIX_BY_24H,
  176. "rule_key": key,
  177. "score_df": score_df[['videoid', 'score']]})
  178. def h_rank_bottom(now_date, now_h, rule_key):
  179. """未按时更新数据,用模型召回数据作为当前的数据"""
  180. log_.info(f"rule_key = {rule_key}")
  181. # 获取rov模型结果
  182. redis_helper = RedisHelper()
  183. if now_h == 0:
  184. redis_dt = datetime.strftime(now_date - timedelta(days=1), '%Y%m%d')
  185. redis_h = 23
  186. else:
  187. redis_dt = datetime.strftime(now_date, '%Y%m%d')
  188. redis_h = now_h - 1
  189. key_prefix_list = [config_.RECALL_KEY_NAME_PREFIX_BY_24H, config_.RECALL_KEY_NAME_PREFIX_DUP_24H]
  190. for key_prefix in key_prefix_list:
  191. key_name = f"{key_prefix}{rule_key}.{redis_dt}.{redis_h}"
  192. initial_data = redis_helper.get_data_zset_with_index(key_name=key_name, start=0, end=-1, with_scores=True)
  193. final_data = dict()
  194. for video_id, score in initial_data:
  195. final_data[video_id] = score
  196. # 存入对应的redis
  197. final_key_name = \
  198. f"{key_prefix}{rule_key}.{datetime.strftime(now_date, '%Y%m%d')}.{now_h}"
  199. if len(final_data) > 0:
  200. redis_helper.add_data_with_zset(key_name=final_key_name, data=final_data, expire_time=23 * 3600)
  201. # 清空线上过滤应用列表
  202. redis_helper.del_keys(key_name=f"{config_.H_VIDEO_FILER_24H}{rule_key}")
  203. def h_timer_check():
  204. project = config_.PROJECT_24H
  205. table = config_.TABLE_24H
  206. rule_params = config_.RULE_PARAMS_24H
  207. now_date = datetime.today()
  208. log_.info(f"now_date: {datetime.strftime(now_date, '%Y%m%d%H')}")
  209. now_min = datetime.now().minute
  210. now_h = datetime.now().hour
  211. # 查看当前天级更新的数据是否已准备好
  212. h_data_count = h_data_check(project=project, table=table, now_date=now_date, now_h=now_h)
  213. if h_data_count > 0:
  214. log_.info(f'h_by24h_data_count = {h_data_count}')
  215. # 数据准备好,进行更新
  216. rank_by_h(now_date=now_date, now_h=now_h, rule_params=rule_params, project=project, table=table)
  217. elif now_min > 50:
  218. log_.info('h_by24h_recall data is None!')
  219. for key, _ in rule_params.items():
  220. h_rank_bottom(now_date=now_date, now_h=now_h, rule_key=key)
  221. else:
  222. # 数据没准备好,1分钟后重新检查
  223. Timer(60, h_timer_check).start()
  224. if __name__ == '__main__':
  225. h_timer_check()