rule_rank_h_18_19.py 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. # -*- coding: utf-8 -*-
  2. # @ModuleName: rule_rank_h_18_19
  3. # @Author: Liqian
  4. # @Time: 2022/4/21 下午4:31
  5. # @Software: PyCharm
  6. import datetime
  7. import pandas as pd
  8. import math
  9. from odps import ODPS
  10. from threading import Timer
  11. from get_data import get_data_from_odps
  12. from db_helper import RedisHelper
  13. from config import set_config
  14. from log import Log
  15. config_, _ = set_config()
  16. log_ = Log()
  17. features = [
  18. 'videoid',
  19. 'lastonehour_view', # 过去1小时曝光
  20. 'lastonehour_play', # 过去1小时播放
  21. 'lastonehour_share', # 过去1小时分享
  22. 'lastonehour_return', # 过去1小时分享,过去1小时回流
  23. ]
  24. def h_data_check(project, table, now_date):
  25. """检查数据是否准备好"""
  26. odps = ODPS(
  27. access_id=config_.ODPS_CONFIG['ACCESSID'],
  28. secret_access_key=config_.ODPS_CONFIG['ACCESSKEY'],
  29. project=project,
  30. endpoint=config_.ODPS_CONFIG['ENDPOINT'],
  31. connect_timeout=3000,
  32. read_timeout=500000,
  33. pool_maxsize=1000,
  34. pool_connections=1000
  35. )
  36. try:
  37. dt = datetime.datetime.strftime(now_date, '%Y%m%d%H')
  38. sql = f'select * from {project}.{table} where dt = {dt}'
  39. with odps.execute_sql(sql=sql).open_reader() as reader:
  40. data_count = reader.count
  41. except Exception as e:
  42. data_count = 0
  43. return data_count
  44. def get_feature_data(now_date, project, table):
  45. """获取特征数据"""
  46. dt = datetime.datetime.strftime(now_date, '%Y%m%d%H')
  47. # dt = '2022041310'
  48. records = get_data_from_odps(date=dt, project=project, table=table)
  49. feature_data = []
  50. for record in records:
  51. item = {}
  52. for feature_name in features:
  53. item[feature_name] = record[feature_name]
  54. feature_data.append(item)
  55. feature_df = pd.DataFrame(feature_data)
  56. return feature_df
  57. def cal_score(df):
  58. """
  59. 计算score
  60. :param df: 特征数据
  61. :return:
  62. """
  63. # score计算公式: sharerate*backrate*logback*ctr
  64. # sharerate = lastonehour_share/(lastonehour_play+1000)
  65. # backrate = lastonehour_return/(lastonehour_share+10)
  66. # ctr = lastonehour_play/(lastonehour_view+1000), 对ctr限最大值:K2 = 0.6 if ctr > 0.6 else ctr
  67. # score = sharerate * backrate * LOG(lastonehour_return+1) * K2
  68. df = df.fillna(0)
  69. df['share_rate'] = df['lastonehour_share'] / (df['lastonehour_play'] + 1000)
  70. df['back_rate'] = df['lastonehour_return'] / (df['lastonehour_share'] + 10)
  71. df['log_back'] = (df['lastonehour_return'] + 1).apply(math.log)
  72. df['ctr'] = df['lastonehour_play'] / (df['lastonehour_view'] + 1000)
  73. df['K2'] = df['ctr'].apply(lambda x: 0.6 if x > 0.6 else x)
  74. df['score'] = df['share_rate'] * df['back_rate'] * df['log_back'] * df['K2']
  75. df = df.sort_values(by=['score'], ascending=False)
  76. return df
  77. def video_rank(df, now_date, now_h, return_count):
  78. """
  79. 根据回流数量,对视频进行二次排序
  80. :param df:
  81. :param now_date:
  82. :param now_h:
  83. :param return_count: 小时级数据回流限制数
  84. :return:
  85. """
  86. log_.info(f'df length = {len(df)}')
  87. # 获取符合进入召回源条件的视频,进入条件:小时级回流>=20 && score>=0.005
  88. h_recall_df = df[(df['lastonehour_return'] >= return_count) & (df['score'] >= 0.005)]
  89. h_recall_videos = h_recall_df['videoid'].to_list()
  90. log_.info(f'h_recall videos count = {len(h_recall_videos)}')
  91. # 不符合进入召回源条件的视频
  92. df = df.append(h_recall_df)
  93. h_else_df = df.drop_duplicates(['videoid'], keep=False)
  94. h_else_df = h_else_df.sort_values(by=['score'], ascending=False)
  95. h_else_videos = h_else_df['videoid'].to_list
  96. # 合并,给定分数
  97. final_videos = h_recall_videos + h_else_videos
  98. for i, video_id in enumerate(final_videos):
  99. # 写入对应的redis
  100. h_video_ids =[]
  101. h_recall_result = {}
  102. for video_id in h_recall_videos:
  103. score = h_recall_df[h_recall_df['videoid'] == video_id]['score']
  104. h_recall_result[int(video_id)] = float(score)
  105. h_video_ids.append(int(video_id))
  106. h_recall_key_name = \
  107. f"{config_.RECALL_KEY_NAME_PREFIX_BY_H}{return_count}.{datetime.datetime.strftime(now_date, '%Y%m%d')}.{now_h}"
  108. if len(h_recall_result) > 0:
  109. redis_helper.add_data_with_zset(key_name=h_recall_key_name, data=h_recall_result, expire_time=23 * 3600)
  110. # 清空线上过滤应用列表
  111. redis_helper.del_keys(key_name=f"{config_.H_VIDEO_FILER}{return_count}")
  112. # 去重更新rov模型结果,并另存为redis中
  113. initial_data_dup = {}
  114. for video_id, score in initial_data:
  115. if int(video_id) not in h_video_ids:
  116. initial_data_dup[int(video_id)] = score
  117. log_.info(f"initial data dup count = {len(initial_data_dup)}")
  118. initial_key_name = \
  119. f"{config_.RECALL_KEY_NAME_PREFIX_DUP_H}{return_count}.{datetime.datetime.strftime(now_date, '%Y%m%d')}.{now_h}"
  120. if len(initial_data_dup) > 0:
  121. redis_helper.add_data_with_zset(key_name=initial_key_name, data=initial_data_dup, expire_time=23 * 3600)
  122. # # 去重合并
  123. # final_videos = [int(item) for item in h_recall_videos]
  124. # temp_videos = [int(video_id) for video_id, _ in initial_data if int(video_id) not in final_videos]
  125. # final_videos = final_videos + temp_videos
  126. # log_.info(f'final videos count = {len(final_videos)}')
  127. #
  128. # # 重新给定score
  129. # final_data = {}
  130. # for i, video_id in enumerate(final_videos):
  131. # score = 100 - i * config_.ROV_SCORE_D
  132. # final_data[video_id] = score
  133. #
  134. # # 存入对应的redis
  135. # final_key_name = f"{config_.RECALL_KEY_NAME_PREFIX_BY_H}{datetime.datetime.strftime(now_date, '%Y%m%d')}.{now_h}"
  136. # redis_helper.add_data_with_zset(key_name=final_key_name, data=final_data, expire_time=24 * 3600)
  137. def rank_by_h(now_date, now_h, return_count_list):
  138. # 获取特征数据
  139. feature_df = get_feature_data(now_date=now_date)
  140. # 计算score
  141. score_df = cal_score(df=feature_df)
  142. # rank
  143. for cnt in return_count_list:
  144. log_.info(f"return_count = {cnt}")
  145. video_rank(df=score_df, now_date=now_date, now_h=now_h, return_count=cnt)
  146. # to-csv
  147. score_filename = f"score_{datetime.datetime.strftime(now_date, '%Y%m%d%H')}.csv"
  148. score_df.to_csv(f'./data/{score_filename}')
  149. def h_rank_bottom(now_date, now_h, return_count):
  150. """未按时更新数据,用上一小时结果作为当前小时的数据"""
  151. log_.info(f"return_count = {return_count}")
  152. # 获取rov模型结果
  153. redis_helper = RedisHelper()
  154. if now_h == 0:
  155. redis_dt = datetime.datetime.strftime(now_date - datetime.timedelta(days=1), '%Y%m%d')
  156. redis_h = 23
  157. else:
  158. redis_dt = datetime.datetime.strftime(now_date, '%Y%m%d')
  159. redis_h = now_h - 1
  160. key_prefix_list = [config_.RECALL_KEY_NAME_PREFIX_BY_H, config_.RECALL_KEY_NAME_PREFIX_DUP_H]
  161. for key_prefix in key_prefix_list:
  162. key_name = f"{key_prefix}{return_count}.{redis_dt}.{redis_h}"
  163. initial_data = redis_helper.get_data_zset_with_index(key_name=key_name, start=0, end=-1, with_scores=True)
  164. final_data = dict()
  165. for video_id, score in initial_data:
  166. final_data[video_id] = score
  167. # 存入对应的redis
  168. final_key_name = \
  169. f"{key_prefix}{return_count}.{datetime.datetime.strftime(now_date, '%Y%m%d')}.{now_h}"
  170. if len(final_data) > 0:
  171. redis_helper.add_data_with_zset(key_name=final_key_name, data=final_data, expire_time=23 * 3600)
  172. # 清空线上过滤应用列表
  173. redis_helper.del_keys(key_name=f"{config_.H_VIDEO_FILER}{return_count}")
  174. def h_timer_check():
  175. return_count_list = [20, 10]
  176. now_date = datetime.datetime.today()
  177. log_.info(f"now_date: {datetime.datetime.strftime(now_date, '%Y%m%d%H')}")
  178. now_h = datetime.datetime.now().hour
  179. now_min = datetime.datetime.now().minute
  180. if now_h == 0:
  181. for cnt in return_count_list:
  182. h_rank_bottom(now_date=now_date, now_h=now_h, return_count=cnt)
  183. return
  184. # 查看当前小时更新的数据是否已准备好
  185. h_data_count = h_data_check(project=project, table=table, now_date=now_date)
  186. if h_data_count > 0:
  187. log_.info(f'h_data_count = {h_data_count}')
  188. # 数据准备好,进行更新
  189. rank_by_h(now_date=now_date, now_h=now_h, return_count_list=return_count_list)
  190. elif now_min > 50:
  191. log_.info('h_recall data is None, use bottom data!')
  192. for cnt in return_count_list:
  193. h_rank_bottom(now_date=now_date, now_h=now_h, return_count=cnt)
  194. else:
  195. # 数据没准备好,1分钟后重新检查
  196. Timer(60, h_timer_check).start()
  197. if __name__ == '__main__':
  198. # df1 = get_feature_data()
  199. # res = cal_score(df=df1)
  200. # video_rank(df=res, now_date=datetime.datetime.today())
  201. # rank_by_h()
  202. h_timer_check()