rule_rank_h_18_19.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. # -*- coding: utf-8 -*-
  2. # @ModuleName: rule_rank_h_18_19
  3. # @Author: Liqian
  4. # @Time: 2022/4/21 下午4:31
  5. # @Software: PyCharm
  6. import datetime
  7. import pandas as pd
  8. import math
  9. from odps import ODPS
  10. from threading import Timer
  11. from get_data import get_data_from_odps
  12. from db_helper import RedisHelper
  13. from config import set_config
  14. from log import Log
  15. config_, _ = set_config()
  16. log_ = Log()
  17. features = [
  18. 'videoid',
  19. 'lastonehour_view', # 过去1小时曝光
  20. 'lastonehour_play', # 过去1小时播放
  21. 'lastonehour_share', # 过去1小时分享
  22. 'lastonehour_return', # 过去1小时分享,过去1小时回流
  23. ]
  24. def h_data_check(project, table, now_date):
  25. """检查数据是否准备好"""
  26. odps = ODPS(
  27. access_id=config_.ODPS_CONFIG['ACCESSID'],
  28. secret_access_key=config_.ODPS_CONFIG['ACCESSKEY'],
  29. project=project,
  30. endpoint=config_.ODPS_CONFIG['ENDPOINT'],
  31. connect_timeout=3000,
  32. read_timeout=500000,
  33. pool_maxsize=1000,
  34. pool_connections=1000
  35. )
  36. try:
  37. dt = datetime.datetime.strftime(now_date, '%Y%m%d%H')
  38. sql = f'select * from {project}.{table} where dt = {dt}'
  39. with odps.execute_sql(sql=sql).open_reader() as reader:
  40. data_count = reader.count
  41. except Exception as e:
  42. data_count = 0
  43. return data_count
  44. def get_feature_data(now_date, project, table):
  45. """获取特征数据"""
  46. dt = datetime.datetime.strftime(now_date, '%Y%m%d%H')
  47. # dt = '2022041310'
  48. records = get_data_from_odps(date=dt, project=project, table=table)
  49. feature_data = []
  50. for record in records:
  51. item = {}
  52. for feature_name in features:
  53. item[feature_name] = record[feature_name]
  54. feature_data.append(item)
  55. feature_df = pd.DataFrame(feature_data)
  56. return feature_df
  57. def cal_score(df):
  58. """
  59. 计算score
  60. :param df: 特征数据
  61. :return:
  62. """
  63. # score计算公式: sharerate*backrate*logback*ctr
  64. # sharerate = lastonehour_share/(lastonehour_play+1000)
  65. # backrate = lastonehour_return/(lastonehour_share+10)
  66. # ctr = lastonehour_play/(lastonehour_view+1000), 对ctr限最大值:K2 = 0.6 if ctr > 0.6 else ctr
  67. # score = sharerate * backrate * LOG(lastonehour_return+1) * K2
  68. df = df.fillna(0)
  69. df['share_rate'] = df['lastonehour_share'] / (df['lastonehour_play'] + 1000)
  70. df['back_rate'] = df['lastonehour_return'] / (df['lastonehour_share'] + 10)
  71. df['log_back'] = (df['lastonehour_return'] + 1).apply(math.log)
  72. df['ctr'] = df['lastonehour_play'] / (df['lastonehour_view'] + 1000)
  73. df['K2'] = df['ctr'].apply(lambda x: 0.6 if x > 0.6 else x)
  74. df['score'] = df['share_rate'] * df['back_rate'] * df['log_back'] * df['K2']
  75. df = df.sort_values(by=['score'], ascending=False)
  76. return df
  77. def video_rank(app_type, df, now_date, now_h, return_count):
  78. """
  79. 根据回流数量,对视频进行二次排序
  80. :param app_type:
  81. :param df:
  82. :param now_date:
  83. :param now_h:
  84. :param return_count: 小时级数据回流限制数
  85. :return:
  86. """
  87. log_.info(f'df length = {len(df)}')
  88. # 获取符合进入召回源条件的视频,进入条件:小时级回流>=20 && score>=0.005
  89. h_recall_df = df[(df['lastonehour_return'] >= return_count) & (df['score'] >= 0.005)]
  90. h_recall_videos = h_recall_df['videoid'].to_list()
  91. log_.info(f'h_recall videos count = {len(h_recall_videos)}')
  92. # 不符合进入召回源条件的视频
  93. df = df.append(h_recall_df)
  94. h_else_df = df.drop_duplicates(['videoid'], keep=False)
  95. h_else_df = h_else_df.sort_values(by=['score'], ascending=False)
  96. h_else_videos = h_else_df['videoid'].to_list
  97. # 合并,给定分数
  98. final_videos = h_recall_videos + h_else_videos
  99. final_result = {}
  100. step = round(100/len(final_videos), 3)
  101. for i, video_id in enumerate(final_videos):
  102. score = 100 - i * step
  103. final_result[int(video_id)] = score
  104. # 写入对应的redis
  105. key_name = \
  106. f"{config_.RECALL_KEY_NAME_PREFIX_APP_TYPE}{app_type}.{datetime.datetime.strftime(now_date, '%Y%m%d')}.{now_h}"
  107. if len(final_result) > 0:
  108. redis_helper = RedisHelper()
  109. redis_helper.add_data_with_zset(key_name=key_name, data=final_result, expire_time=23 * 3600)
  110. def rank_by_h(app_type, now_date, now_h, return_count_list, project, table):
  111. # 获取特征数据
  112. feature_df = get_feature_data(now_date=now_date, project=project, table=table)
  113. # 计算score
  114. score_df = cal_score(df=feature_df)
  115. # rank
  116. for cnt in return_count_list:
  117. log_.info(f"return_count = {cnt}")
  118. video_rank(app_type=app_type, df=score_df, now_date=now_date, now_h=now_h, return_count=cnt)
  119. # to-csv
  120. score_filename = f"score_{app_type}_{datetime.datetime.strftime(now_date, '%Y%m%d%H')}.csv"
  121. score_df.to_csv(f'./data/{score_filename}')
  122. def h_rank_bottom(app_type, now_date, now_h):
  123. """未按时更新数据,用上一小时结果作为当前小时的数据"""
  124. log_.info(f"app_type = {app_type}")
  125. # 获取rov模型结果
  126. redis_helper = RedisHelper()
  127. if now_h == 0:
  128. redis_dt = datetime.datetime.strftime(now_date - datetime.timedelta(days=1), '%Y%m%d')
  129. redis_h = 23
  130. else:
  131. redis_dt = datetime.datetime.strftime(now_date, '%Y%m%d')
  132. redis_h = now_h - 1
  133. key_name = f"{config_.RECALL_KEY_NAME_PREFIX_APP_TYPE}{app_type}.{redis_dt}.{redis_h}"
  134. initial_data = redis_helper.get_data_zset_with_index(key_name=key_name, start=0, end=-1, with_scores=True)
  135. final_data = dict()
  136. for video_id, score in initial_data:
  137. final_data[video_id] = score
  138. # 存入对应的redis
  139. final_key_name = \
  140. f"{config_.RECALL_KEY_NAME_PREFIX_APP_TYPE}{app_type}.{datetime.datetime.strftime(now_date, '%Y%m%d')}.{now_h}"
  141. if len(final_data) > 0:
  142. redis_helper.add_data_with_zset(key_name=final_key_name, data=final_data, expire_time=23 * 3600)
  143. def h_timer_check(app_type):
  144. log_.info(f"app_type = {app_type}")
  145. return_count_list = [20]
  146. now_date = datetime.datetime.today()
  147. log_.info(f"now_date: {datetime.datetime.strftime(now_date, '%Y%m%d%H')}")
  148. now_h = datetime.datetime.now().hour
  149. now_min = datetime.datetime.now().minute
  150. if now_h == 0:
  151. h_rank_bottom(app_type=app_type, now_date=now_date, now_h=now_h)
  152. return
  153. # 查看当前小时更新的数据是否已准备好
  154. project = config_.PREDICT_PROJECT_18_19[str(app_type)]
  155. table = config_.PREDICT_TABLE_18_19[str(app_type)]
  156. h_data_count = h_data_check(project=project, table=table, now_date=now_date)
  157. if h_data_count > 0:
  158. log_.info(f'h_data_count = {h_data_count}')
  159. # 数据准备好,进行更新
  160. rank_by_h(app_type=app_type, now_date=now_date, now_h=now_h,
  161. return_count_list=return_count_list, project=project, table=table)
  162. elif now_min > 50:
  163. log_.info('h_recall data is None, use bottom data!')
  164. h_rank_bottom(app_type=app_type, now_date=now_date, now_h=now_h)
  165. else:
  166. # 数据没准备好,1分钟后重新检查
  167. Timer(60, h_timer_check).start()
  168. if __name__ == '__main__':
  169. # df1 = get_feature_data()
  170. # res = cal_score(df=df1)
  171. # video_rank(df=res, now_date=datetime.datetime.today())
  172. # rank_by_h()
  173. app_type_list = [18, 19]
  174. for app_type in app_type_list:
  175. h_timer_check(app_type=app_type)