alg_growth_gh_reply_video_v1.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. # -*- coding: utf-8 -*-
  2. import pandas as pd
  3. import traceback
  4. import odps
  5. from odps import ODPS
  6. from threading import Timer
  7. from datetime import datetime, timedelta
  8. from db_helper import MysqlHelper
  9. from my_utils import check_table_partition_exits_v2, get_dataframe_from_odps, \
  10. get_odps_df_of_max_partition, get_odps_instance, get_odps_df_of_recent_partitions
  11. from my_utils import request_post, send_msg_to_feishu
  12. from my_config import set_config
  13. import numpy as np
  14. from log import Log
  15. import os
  16. from argparse import ArgumentParser
  17. from constants import AutoReplyAccountType
  18. CONFIG, _ = set_config()
  19. LOGGER = Log()
  20. BASE_GROUP_NAME = 'stg0909-base'
  21. EXPLORE1_GROUP_NAME = 'stg0909-explore1'
  22. EXPLORE2_GROUP_NAME = 'stg0909-explore2'
  23. GH_IDS = ('gh_ac43e43b253b', 'gh_93e00e187787', 'gh_77f36c109fb1',
  24. 'gh_68e7fdc09fe4', 'gh_b181786a6c8c')
  25. CDN_IMG_OPERATOR = "?x-oss-process=image/resize,m_fill,w_600,h_480,limit_0/format,jpg/watermark,image_eXNoL3BpYy93YXRlcm1hcmtlci9pY29uX3BsYXlfd2hpdGUucG5nP3gtb3NzLXByb2Nlc3M9aW1hZ2UvcmVzaXplLHdfMTQ0,g_center"
  26. ODS_PROJECT = "loghubods"
  27. EXPLORE_POOL_TABLE = 'alg_growth_video_return_stats_history'
  28. GH_REPLY_STATS_TABLE = 'alg_growth_gh_reply_video_stats'
  29. GH_REPLY_STATS_HOUR_TABLE = 'alg_growth_gh_reply_video_stats_hour'
  30. ODPS_RANK_RESULT_TABLE = 'alg_gh_autoreply_video_rank_data'
  31. RDS_RANK_RESULT_TABLE = 'alg_gh_autoreply_video_rank_data'
  32. GH_DETAIL = 'gh_detail'
  33. STATS_PERIOD_DAYS = 5
  34. STATS_PERIOD_DAYS_FOR_QUIT = 30
  35. SEND_N = 2
  36. pd.set_option('display.max_rows', None)
  37. def get_and_update_gh_ids(run_dt):
  38. gh = get_odps_df_of_max_partition(ODS_PROJECT, GH_DETAIL, {'dt': run_dt})
  39. gh = gh.to_pandas()
  40. gh = gh[gh['type'] == AutoReplyAccountType.SELF_OWNED_GZH.value]
  41. # default单独处理
  42. if 'default' not in gh['gh_id'].values:
  43. new_row = pd.DataFrame({'gh_id': ['default'], 'gh_name': ['默认'], 'type': [2], 'category1': ['泛生活']},
  44. index=[0])
  45. gh = pd.concat([gh, new_row], ignore_index=True)
  46. gh = gh.drop_duplicates(subset=['gh_id'])
  47. global GH_IDS
  48. GH_IDS = tuple(gh['gh_id'])
  49. return gh
  50. def check_data_partition(project, table, data_dt, data_hr=None):
  51. """检查数据是否准备好"""
  52. try:
  53. partition_spec = {'dt': data_dt}
  54. if data_hr:
  55. partition_spec['hour'] = data_hr
  56. part_exist, data_count = check_table_partition_exits_v2(
  57. project, table, partition_spec)
  58. except Exception as e:
  59. data_count = 0
  60. return data_count
  61. def get_last_strategy_result(project, rank_table, dt_version, key):
  62. strategy_df = get_odps_df_of_max_partition(
  63. project, rank_table, { 'ctime': dt_version }
  64. ).to_pandas()
  65. sub_df = strategy_df.query(f'strategy_key == "{key}"')
  66. sub_df = sub_df[['gh_id', 'video_id', 'strategy_key', 'sort']].drop_duplicates()
  67. return sub_df
  68. def process_reply_stats(project, daily_table, hourly_table, period, run_dt, run_hour):
  69. # 获取多天+当天即转统计数据用于聚合
  70. df = get_odps_df_of_recent_partitions(
  71. project, daily_table, period, {'dt': run_dt}).to_pandas()
  72. hour_data_version = f'{run_dt}{run_hour}'
  73. hourly_df = get_odps_df_of_recent_partitions(
  74. project, hourly_table, 1, {'dt': hour_data_version}).to_pandas()
  75. df = pd.concat([df, hourly_df]).reset_index(drop=True)
  76. df['video_id'] = df['video_id'].astype('int64')
  77. df = df[['gh_id', 'video_id', 'send_count', 'first_visit_uv', 'day0_return']]
  78. # 账号内聚合
  79. df = df.groupby(['video_id', 'gh_id']).agg({
  80. 'send_count': 'sum',
  81. 'first_visit_uv': 'sum',
  82. 'day0_return': 'sum'
  83. }).reset_index()
  84. # 聚合所有数据作为default
  85. default_stats_df = df.groupby('video_id').agg({
  86. 'send_count': 'sum',
  87. 'first_visit_uv': 'sum',
  88. 'day0_return': 'sum'
  89. }).reset_index()
  90. default_stats_df['gh_id'] = 'default'
  91. merged_df = pd.concat([df, default_stats_df]).reset_index(drop=True)
  92. merged_df['score'] = merged_df['day0_return'] / (merged_df['send_count'] + 500)
  93. return merged_df
  94. def rank_for_layer1(run_dt, run_hour, project, table, gh_df):
  95. # TODO: 加审核
  96. df = get_odps_df_of_max_partition(project, table, {'dt': run_dt})
  97. df = df.to_pandas()
  98. # use statistic data to quit some low-efficiency video
  99. stats_df = get_odps_df_of_recent_partitions(
  100. ODS_PROJECT, GH_REPLY_STATS_TABLE, 30, {'dt': run_dt}).to_pandas()
  101. stats_df['video_id'] = stats_df['video_id'].astype('int64')
  102. stats_df = stats_df[['video_id', 'send_count', 'first_visit_uv', 'day0_return']]
  103. stats_df = stats_df.groupby(['video_id']).agg({
  104. 'send_count': 'sum',
  105. 'first_visit_uv': 'sum',
  106. 'day0_return': 'sum'
  107. })
  108. # do not add to denominator
  109. stats_df['return_by_send'] = stats_df['day0_return'] / (stats_df['send_count'])
  110. stats_df['open_rate'] = stats_df['first_visit_uv'] / (stats_df['send_count'])
  111. # do not filter video that does not have enough data
  112. stats_df = stats_df.query('send_count > 1000')
  113. df = df.merge(stats_df, on='video_id', how='left')
  114. open_rate_threshold = df.open_rate.quantile(q=0.2)
  115. return_by_send_threshold = df.return_by_send.quantile(q=0.2)
  116. filter_condition = 'open_rate < {} and return_by_send < {}' \
  117. .format(open_rate_threshold, return_by_send_threshold)
  118. filter_rows = df.query(filter_condition)
  119. df = df.drop(filter_rows.index)
  120. print("low-efficient video to quit:")
  121. print(filter_rows[['video_id', 'title', 'send_count', 'open_rate', 'return_by_send']])
  122. # 确保重跑时可获得一致结果
  123. dt_version = f'{run_dt}{run_hour}'
  124. np.random.seed(int(dt_version) + 1)
  125. # TODO: 修改权重计算策略
  126. df['score'] = df['ros']
  127. # 按照 category1 分类后进行加权随机抽样
  128. sampled_df = df.groupby('category1').apply(
  129. lambda x: x.sample(n=SEND_N, weights=x['score'], replace=False)).reset_index(drop=True)
  130. sampled_df['sort'] = sampled_df.groupby('category1')['score'].rank(method='first', ascending=False).astype(int)
  131. sampled_df['strategy_key'] = EXPLORE1_GROUP_NAME
  132. sampled_df['dt_version'] = dt_version
  133. extend_df = sampled_df.merge(gh_df, on='category1')
  134. result_df = extend_df[['strategy_key', 'dt_version', 'gh_id', 'sort', 'video_id', 'score']]
  135. return result_df
  136. def rank_for_layer2(run_dt, run_hour, rank_table):
  137. stats_df = process_reply_stats(
  138. ODS_PROJECT, GH_REPLY_STATS_TABLE, GH_REPLY_STATS_HOUR_TABLE,
  139. STATS_PERIOD_DAYS, run_dt, run_hour)
  140. # 确保重跑时可获得一致结果
  141. dt_version = f'{run_dt}{run_hour}'
  142. np.random.seed(int(dt_version)+1)
  143. # TODO: 计算账号间相关性
  144. ## 账号两两组合,取有RoVn数值视频的交集,单个账号内的RoVn(平滑后)组成向量
  145. ## 求向量相关系数或cosine相似度
  146. ## 单个视频的RoVn加权求和
  147. # 当前实现基础版本:只在账号内求二级探索排序分
  148. sampled_dfs = []
  149. # 处理default逻辑(default-explore2)
  150. default_stats_df = stats_df.query('gh_id == "default"')
  151. sampled_df = default_stats_df.sample(n=SEND_N, weights=default_stats_df['score'])
  152. sampled_df['sort'] = range(1, len(sampled_df) + 1)
  153. sampled_dfs.append(sampled_df)
  154. # 基础过滤for账号
  155. df = stats_df.query('day0_return > 100')
  156. # fallback to base if necessary
  157. base_strategy_df = get_last_strategy_result(
  158. ODS_PROJECT, rank_table, dt_version, BASE_GROUP_NAME)
  159. for gh_id in GH_IDS:
  160. if gh_id == 'default':
  161. continue
  162. sub_df = df.query(f'gh_id == "{gh_id}"')
  163. if len(sub_df) < SEND_N:
  164. LOGGER.warning(
  165. "gh_id[{}] rows[{}] not enough for layer2, fallback to base"
  166. .format(gh_id, len(sub_df)))
  167. sub_df = base_strategy_df.query(f'gh_id == "{gh_id}"')
  168. sub_df['score'] = sub_df['sort']
  169. sampled_df = sub_df.sample(n=SEND_N, weights=sub_df['score'])
  170. sampled_df['sort'] = range(1, len(sampled_df) + 1)
  171. sampled_dfs.append(sampled_df)
  172. extend_df = pd.concat(sampled_dfs)
  173. extend_df['strategy_key'] = EXPLORE2_GROUP_NAME
  174. extend_df['dt_version'] = dt_version
  175. result_df = extend_df[['strategy_key', 'dt_version', 'gh_id', 'sort', 'video_id', 'score']]
  176. return result_df
  177. def rank_for_base(run_dt, run_hour, rank_table):
  178. stats_df = process_reply_stats(
  179. ODS_PROJECT, GH_REPLY_STATS_TABLE, GH_REPLY_STATS_HOUR_TABLE,
  180. STATS_PERIOD_DAYS, run_dt, run_hour)
  181. #TODO: support to set base manually
  182. dt_version = f'{run_dt}{run_hour}'
  183. # 获取当前base信息, 策略表dt_version(ctime partition)采用当前时间
  184. base_strategy_df = get_last_strategy_result(
  185. ODS_PROJECT, rank_table, dt_version, BASE_GROUP_NAME)
  186. default_stats_df = stats_df.query('gh_id == "default"')
  187. # 在账号内排序,决定该账号(包括default)的base利用内容
  188. # 排序过程中,确保当前base策略参与排序,因此先关联再过滤
  189. non_default_ids = list(filter(lambda x: x != 'default', GH_IDS))
  190. gh_ids_str = ','.join(f'"{x}"' for x in non_default_ids)
  191. stats_df = stats_df.query(f'gh_id in ({gh_ids_str})')
  192. stats_with_strategy_df = stats_df \
  193. .merge(
  194. base_strategy_df,
  195. on=['gh_id', 'video_id'],
  196. how='left') \
  197. .query('strategy_key.notna() or score > 0.1')
  198. # 合并default和分账号数据
  199. grouped_stats_df = pd.concat([default_stats_df, stats_with_strategy_df]).reset_index()
  200. def set_top_n(group, n=2):
  201. group_sorted = group.sort_values(by='score', ascending=False)
  202. top_n = group_sorted.head(n)
  203. top_n['sort'] = range(1, n + 1)
  204. return top_n
  205. ranked_df = grouped_stats_df.groupby('gh_id').apply(set_top_n, SEND_N)
  206. ranked_df = ranked_df.reset_index(drop=True)
  207. #ranked_df['sort'] = grouped_stats_df.groupby('gh_id')['score'].rank(ascending=False)
  208. ranked_df['strategy_key'] = BASE_GROUP_NAME
  209. ranked_df['dt_version'] = dt_version
  210. ranked_df = ranked_df[['strategy_key', 'dt_version', 'gh_id', 'sort', 'video_id', 'score']]
  211. return ranked_df
  212. def check_result_data(df):
  213. for gh_id in GH_IDS:
  214. for key in (EXPLORE1_GROUP_NAME, EXPLORE2_GROUP_NAME, BASE_GROUP_NAME):
  215. sub_df = df.query(f'gh_id == "{gh_id}" and strategy_key == "{key}"')
  216. if len(sub_df) != SEND_N:
  217. raise Exception(f"Result not enough for gh_id[{gh_id}] group[{key}]")
  218. def postprocess_override_by_config(df, dt_version):
  219. config = json.load(open("configs/gh_reply_video.json"))
  220. override_data = {
  221. 'strategy_key': [],
  222. 'gh_id': [],
  223. 'sort': [],
  224. 'video_id': []
  225. }
  226. for gh_id in config:
  227. gh_config = config[gh_id]
  228. for key in gh_config:
  229. for video_config in gh_config[key]:
  230. # remove current
  231. position = video_config['position']
  232. video_id = video_config['video_id']
  233. df = df.drop(df.query(f'gh_id == "{gh_id}" and strategy_key == "{key}" and sort == {position}').index)
  234. override_data['strategy_key'].append(key)
  235. override_data['gh_id'].append(gh_id)
  236. override_data['sort'].append(position)
  237. override_data['video_id'].append(video_id)
  238. n_records = len(override_data['strategy_key'])
  239. override_data['dt_version'] = [dt_version] * n_records
  240. override_data['score'] = [0.0] * n_records
  241. df_to_append = pd.DataFrame(override_data)
  242. df = pd.concat([df, df_to_append], ignore_index=True)
  243. return df
  244. def build_and_transfer_data(run_dt, run_hour, project, **kwargs):
  245. dt_version = f'{run_dt}{run_hour}'
  246. dry_run = kwargs.get('dry_run', False)
  247. next_dt = (datetime.strptime(run_dt, "%Y%m%d") + timedelta(1)).strftime("%Y%m%d")
  248. gh_df = get_and_update_gh_ids(next_dt)
  249. layer1_rank = rank_for_layer1(run_dt, run_hour, ODS_PROJECT, EXPLORE_POOL_TABLE, gh_df)
  250. layer2_rank = rank_for_layer2( run_dt, run_hour, ODPS_RANK_RESULT_TABLE)
  251. base_rank = rank_for_base(run_dt, run_hour, ODPS_RANK_RESULT_TABLE)
  252. final_rank_df = pd.concat([layer1_rank, layer2_rank, base_rank]).reset_index(drop=True)
  253. final_rank_df = postprocess_override_by_config(final_rank_df, dt_version)
  254. check_result_data(final_rank_df)
  255. odps_instance = get_odps_instance(project)
  256. odps_ranked_df = odps.DataFrame(final_rank_df)
  257. video_df = get_dataframe_from_odps('videoods', 'wx_video')
  258. video_df['cover_url'] = video_df['cover_img_path'] + CDN_IMG_OPERATOR
  259. video_df = video_df['id', 'title', 'cover_url']
  260. final_df = odps_ranked_df.join(video_df, on=('video_id', 'id'))
  261. final_df = final_df.to_pandas()
  262. final_df = final_df[['strategy_key', 'dt_version', 'gh_id', 'sort', 'video_id', 'title', 'cover_url', 'score']]
  263. # reverse sending order
  264. final_df['sort'] = SEND_N + 1 - final_df['sort']
  265. if dry_run:
  266. print(final_df[['strategy_key', 'gh_id', 'sort', 'video_id', 'score', 'title']]
  267. .sort_values(by=['strategy_key', 'gh_id', 'sort']))
  268. return
  269. # save to ODPS
  270. t = odps_instance.get_table(ODPS_RANK_RESULT_TABLE)
  271. part_spec_dict = {'dt': run_dt, 'hour': run_hour, 'ctime': dt_version}
  272. part_spec =','.join(['{}={}'.format(k, part_spec_dict[k]) for k in part_spec_dict.keys()])
  273. with t.open_writer(partition=part_spec, create_partition=True, overwrite=True) as writer:
  274. writer.write(list(final_df.itertuples(index=False)))
  275. # sync to MySQL
  276. data_to_insert = [tuple(row) for row in final_df.itertuples(index=False)]
  277. data_columns = list(final_df.columns)
  278. mysql = MysqlHelper(CONFIG.MYSQL_GROWTH_INFO)
  279. mysql.batch_insert(RDS_RANK_RESULT_TABLE, data_to_insert, data_columns)
  280. # remove old data of same version
  281. for key in final_df['strategy_key'].unique():
  282. sql = f"""
  283. update {RDS_RANK_RESULT_TABLE}
  284. set is_delete = 1
  285. where
  286. dt_version = '{dt_version}'
  287. and strategy_key = '{key}'
  288. and create_time < '{max_time_to_delete}'
  289. and is_delete = 0
  290. """
  291. rows = mysql.execute(sql)
  292. def main_loop():
  293. argparser = ArgumentParser()
  294. argparser.add_argument('-n', '--dry-run', action='store_true')
  295. argparser.add_argument('--run-at',help='assume to run at date and hour, yyyyMMddHH')
  296. args = argparser.parse_args()
  297. run_date = datetime.today()
  298. if args.run_at:
  299. run_date = datetime.strptime(args.run_at, "%Y%m%d%H")
  300. LOGGER.info(f"Assume to run at {run_date.strftime('%Y-%m-%d %H:00')}")
  301. try:
  302. now_date = datetime.today()
  303. LOGGER.info(f"开始执行: {datetime.strftime(now_date, '%Y-%m-%d %H:%M')}")
  304. last_date = run_date - timedelta(1)
  305. last_dt = last_date.strftime("%Y%m%d")
  306. # 查看当前天级更新的数据是否已准备好
  307. # 当前上游统计表为天级更新,但字段设计为兼容小时级
  308. h_data_count = check_data_partition(ODS_PROJECT, GH_REPLY_STATS_TABLE, last_dt, '00')
  309. if h_data_count > 0:
  310. LOGGER.info('上游数据表查询数据条数={},开始计算'.format(h_data_count))
  311. run_dt = run_date.strftime("%Y%m%d")
  312. run_hour = run_date.strftime("%H")
  313. LOGGER.info(f'run_dt: {run_dt}, run_hour: {run_hour}')
  314. build_and_transfer_data(run_dt, run_hour, ODS_PROJECT,
  315. dry_run=args.dry_run)
  316. LOGGER.info('数据更新完成')
  317. else:
  318. LOGGER.info("上游数据未就绪,等待60s")
  319. Timer(60, main_loop).start()
  320. return
  321. except Exception as e:
  322. LOGGER.error(f"数据更新失败, exception: {e}, traceback: {traceback.format_exc()}")
  323. if CONFIG.ENV_TEXT == '开发环境':
  324. return
  325. send_msg_to_feishu(
  326. webhook=CONFIG.FEISHU_ROBOT['growth_task_robot'].get('webhook'),
  327. key_word=CONFIG.FEISHU_ROBOT['growth_task_robot'].get('key_word'),
  328. msg_text=f"rov-offline{CONFIG.ENV_TEXT} - 数据更新失败\n"
  329. f"exception: {e}\n"
  330. f"traceback: {traceback.format_exc()}"
  331. )
  332. if __name__ == '__main__':
  333. LOGGER.info("%s 开始执行" % os.path.basename(__file__))
  334. LOGGER.info(f"environment: {CONFIG.ENV_TEXT}")
  335. main_loop()