Przeglądaj źródła

增加企微策略数据

xueyiming 6 miesięcy temu
rodzic
commit
37d89144de
1 zmienionych plików z 346 dodań i 0 usunięć
  1. 346 0
      alg_growth_we_com_reply_video_v1.py

+ 346 - 0
alg_growth_we_com_reply_video_v1.py

@@ -0,0 +1,346 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import traceback
+import odps
+from odps import ODPS
+from threading import Timer
+from datetime import datetime, timedelta
+from db_helper import MysqlHelper
+from my_utils import check_table_partition_exits_v2, get_dataframe_from_odps, \
+    get_odps_df_of_max_partition, get_odps_instance, get_odps_df_of_recent_partitions
+from my_utils import request_post, send_msg_to_feishu
+from my_config import set_config
+import numpy as np
+from log import Log
+import os
+from argparse import ArgumentParser
+
+CONFIG, _ = set_config()
+LOGGER = Log()
+
+BASE_GROUP_NAME = 'we-com-base'
+EXPLORE1_GROUP_NAME = 'we-com-explore1'
+EXPLORE2_GROUP_NAME = 'we-com-explore2'
+# TODO: fetch gh_id from external data source
+GH_IDS = ('SongYi', 'XinYi', '17512006748')
+
+CDN_IMG_OPERATOR = "?x-oss-process=image/resize,m_fill,w_600,h_480,limit_0/format,jpg/watermark,image_eXNoL3BpYy93YXRlcm1hcmtlci9pY29uX3BsYXlfd2hpdGUucG5nP3gtb3NzLXByb2Nlc3M9aW1hZ2UvcmVzaXplLHdfMTQ0,g_center"
+
+ODS_PROJECT = "loghubods"
+EXPLORE_POOL_TABLE = 'alg_growth_video_return_stats_history'
+GH_REPLY_STATS_TABLE = 'alg_growth_3rd_gh_reply_video_stats'
+# ODPS_RANK_RESULT_TABLE = 'alg_gh_autoreply_video_rank_data'
+ODPS_WE_COM_RANK_RESULT_TABLE = 'alg_we_com_autoreply_video_rank_data'
+RDS_RANK_RESULT_TABLE = 'alg_gh_autoreply_video_rank_data'
+STATS_PERIOD_DAYS = 5
+SEND_N = 2
+
+
+def check_data_partition(project, table, data_dt, data_hr=None):
+    """检查数据是否准备好"""
+    try:
+        partition_spec = {'dt': data_dt}
+        if data_hr:
+            partition_spec['hour'] = data_hr
+        part_exist, data_count = check_table_partition_exits_v2(
+            project, table, partition_spec)
+    except Exception as e:
+        data_count = 0
+    return data_count
+
+
+def get_last_strategy_result(project, rank_table, dt_version, key):
+    strategy_df = get_odps_df_of_max_partition(
+        project, rank_table, {'ctime': dt_version}
+    ).to_pandas()
+    sub_df = strategy_df.query(f'strategy_key == "{key}"')
+    sub_df = sub_df[['gh_id', 'video_id', 'strategy_key', 'sort']].drop_duplicates()
+    return sub_df
+
+
+def process_reply_stats(project, table, period, run_dt):
+    # 获取多天即转统计数据用于聚合
+    df = get_odps_df_of_recent_partitions(project, table, period, {'dt': run_dt})
+    df = df.to_pandas()
+
+    df['video_id'] = df['video_id'].astype('int64')
+    df = df[['gh_id', 'video_id', 'send_count', 'first_visit_uv', 'day0_return']]
+
+    # 账号内聚合
+    df = df.groupby(['video_id', 'gh_id']).agg({
+        'send_count': 'sum',
+        'first_visit_uv': 'sum',
+        'day0_return': 'sum'
+    }).reset_index()
+
+    # 聚合所有数据作为default
+    default_stats_df = df.groupby('video_id').agg({
+        'send_count': 'sum',
+        'first_visit_uv': 'sum',
+        'day0_return': 'sum'
+    }).reset_index()
+    default_stats_df['gh_id'] = 'default'
+
+    merged_df = pd.concat([df, default_stats_df]).reset_index(drop=True)
+
+    merged_df['score'] = merged_df['day0_return'] / (merged_df['send_count'] + 500)
+    return merged_df
+
+
+def rank_for_layer1(run_dt, run_hour, project, table):
+    # TODO: 加审核&退场
+    df = get_odps_df_of_max_partition(project, table, {'dt': run_dt})
+    df = df.to_pandas()
+    # 确保重跑时可获得一致结果
+    dt_version = f'{run_dt}{run_hour}'
+    np.random.seed(int(dt_version) + 1)
+
+    # TODO: 修改权重计算策略
+    df['score'] = df['rov']
+
+    sampled_df = df.sample(n=SEND_N, weights=df['score'])
+    sampled_df['sort'] = range(1, len(sampled_df) + 1)
+    sampled_df['strategy_key'] = EXPLORE1_GROUP_NAME
+    sampled_df['dt_version'] = dt_version
+
+    gh_name_df = pd.DataFrame({'gh_id': GH_IDS + ('default',)})
+    sampled_df['_tmpkey'] = 1
+    gh_name_df['_tmpkey'] = 1
+    extend_df = sampled_df.merge(gh_name_df, on='_tmpkey').drop('_tmpkey', axis=1)
+
+    result_df = extend_df[['strategy_key', 'dt_version', 'gh_id', 'sort', 'video_id', 'score']]
+    return result_df
+
+
+def rank_for_layer2(run_dt, run_hour, project, stats_table, rank_table):
+    stats_df = process_reply_stats(project, stats_table, STATS_PERIOD_DAYS, run_dt)
+
+    # 确保重跑时可获得一致结果
+    dt_version = f'{run_dt}{run_hour}'
+    np.random.seed(int(dt_version) + 1)
+    # TODO: 计算账号间相关性
+    ## 账号两两组合,取有RoVn数值视频的交集,单个账号内的RoVn(平滑后)组成向量
+    ## 求向量相关系数或cosine相似度
+    ## 单个视频的RoVn加权求和
+    # 当前实现基础版本:只在账号内求二级探索排序分
+
+    sampled_dfs = []
+    # 处理default逻辑(default-explore2)
+    default_stats_df = stats_df.query('gh_id == "default"')
+    sampled_df = default_stats_df.sample(n=SEND_N, weights=default_stats_df['score'])
+    sampled_df['sort'] = range(1, len(sampled_df) + 1)
+    sampled_dfs.append(sampled_df)
+
+    # 基础过滤for账号
+    df = stats_df.query('day0_return > 100')
+
+    # fallback to base if necessary
+    base_strategy_df = get_last_strategy_result(
+        project, rank_table, dt_version, BASE_GROUP_NAME)
+
+    for gh_id in GH_IDS:
+        sub_df = df.query(f'gh_id == "{gh_id}"')
+        if len(sub_df) < SEND_N:
+            LOGGER.warning(
+                "gh_id[{}] rows[{}] not enough for layer2, fallback to base"
+                .format(gh_id, len(sub_df)))
+            sub_df = base_strategy_df.query(f'gh_id == "{gh_id}"')
+            sub_df['score'] = sub_df['sort']
+        sampled_df = sub_df.sample(n=SEND_N, weights=sub_df['score'])
+        sampled_df['sort'] = range(1, len(sampled_df) + 1)
+        sampled_dfs.append(sampled_df)
+
+    extend_df = pd.concat(sampled_dfs)
+    extend_df['strategy_key'] = EXPLORE2_GROUP_NAME
+    extend_df['dt_version'] = dt_version
+    result_df = extend_df[['strategy_key', 'dt_version', 'gh_id', 'sort', 'video_id', 'score']]
+    return result_df
+
+
+def rank_for_base(run_dt, run_hour, project, stats_table, rank_table, stg_key):
+    stats_df = process_reply_stats(project, stats_table, STATS_PERIOD_DAYS, run_dt)
+
+    # TODO: support to set base manually
+    dt_version = f'{run_dt}{run_hour}'
+
+    # 获取当前base信息, 策略表dt_version(ctime partition)采用当前时间
+    base_strategy_df = get_last_strategy_result(
+        project, rank_table, dt_version, stg_key)
+
+    default_stats_df = stats_df.query('gh_id == "default"')
+
+    # 在账号内排序,决定该账号(包括default)的base利用内容
+    # 排序过程中,确保当前base策略参与排序,因此先关联再过滤
+    gh_ids_str = ','.join(f'"{x}"' for x in GH_IDS)
+    stats_df = stats_df.query(f'gh_id in ({gh_ids_str})')
+
+    stats_with_strategy_df = stats_df \
+        .merge(
+        base_strategy_df,
+        on=['gh_id', 'video_id'],
+        how='left') \
+        .query('strategy_key.notna() or score > 0.1')
+
+    # 合并default和分账号数据
+    grouped_stats_df = pd.concat([default_stats_df, stats_with_strategy_df]).reset_index()
+
+    def set_top_n(group, n=2):
+        group_sorted = group.sort_values(by='score', ascending=False)
+        top_n = group_sorted.head(n)
+        top_n['sort'] = range(1, len(top_n) + 1)
+        return top_n
+
+    ranked_df = grouped_stats_df.groupby('gh_id').apply(set_top_n, SEND_N)
+    ranked_df = ranked_df.reset_index(drop=True)
+    # ranked_df['sort'] = grouped_stats_df.groupby('gh_id')['score'].rank(ascending=False)
+    ranked_df['strategy_key'] = stg_key
+    ranked_df['dt_version'] = dt_version
+    ranked_df = ranked_df[['strategy_key', 'dt_version', 'gh_id', 'sort', 'video_id', 'score']]
+    return ranked_df
+
+
+def check_result_data(df):
+    for gh_id in GH_IDS + ('default',):
+        for key in (EXPLORE1_GROUP_NAME, EXPLORE2_GROUP_NAME, BASE_GROUP_NAME):
+            sub_df = df.query(f'gh_id == "{gh_id}" and strategy_key == "{key}"')
+            if len(sub_df) != SEND_N:
+                raise Exception(f"Result not enough for gh_id[{gh_id}]")
+
+
+def rank_for_base_designate(run_dt, run_hour, stg_key):
+    dt_version = f'{run_dt}{run_hour}'
+    ranked_df = pd.DataFrame()  # 初始化一个空的 DataFrame
+
+    # 定义每个 gh_id 的视频信息
+    gh_id_data = {
+        'SongYi': [
+            (12794884, 0.6, 1),
+            (13788955, 0.5, 2)
+        ],
+        'XinYi': [
+            (20463342, 0.6, 1),
+            (23231548, 0.5, 2)
+        ],
+        '17512006748': [
+            (14054813, 0.6, 1),
+            (20681137, 0.5, 2)
+        ]
+    }
+
+    # 默认视频信息
+    default_data = [
+        (12794884, 0.6, 1),
+        (13788955, 0.5, 2)
+    ]
+
+    # 遍历 gh_id 列表
+    for gh_id in GH_IDS + ('default',):
+        if gh_id in gh_id_data:
+            data_to_use = gh_id_data[gh_id]
+        else:
+            data_to_use = default_data
+
+        # 创建 DataFrame 并拼接
+        for video_id, score, sort in data_to_use:
+            temp_df = pd.DataFrame({
+                'strategy_key': [stg_key],
+                'dt_version': [dt_version],
+                'gh_id': [gh_id],
+                'sort': [sort],
+                'video_id': [video_id],
+                'score': [score]
+            })
+            ranked_df = pd.concat([ranked_df, temp_df], ignore_index=True)
+    return ranked_df
+
+
+def build_and_transfer_data(run_dt, run_hour, project, **kwargs):
+    dt_version = f'{run_dt}{run_hour}'
+    dry_run = kwargs.get('dry_run', False)
+
+    # layer1_rank = rank_for_layer1(run_dt, run_hour, ODS_PROJECT, EXPLORE_POOL_TABLE)
+    # layer2_rank = rank_for_layer2(run_dt, run_hour, ODS_PROJECT, GH_REPLY_STATS_TABLE, ODPS_WE_COM_RANK_RESULT_TABLE)
+    # base_rank = rank_for_base(run_dt, run_hour, ODS_PROJECT, GH_REPLY_STATS_TABLE, ODPS_WE_COM_RANK_RESULT_TABLE,BASE_GROUP_NAME)
+    layer1_rank = rank_for_base_designate(run_dt, run_hour, EXPLORE1_GROUP_NAME)
+    layer2_rank = rank_for_base_designate(run_dt, run_hour, EXPLORE2_GROUP_NAME)
+    base_rank = rank_for_base_designate(run_dt, run_hour, BASE_GROUP_NAME)
+
+    final_rank_df = pd.concat([layer1_rank, layer2_rank, base_rank]).reset_index(drop=True)
+    check_result_data(final_rank_df)
+
+    odps_instance = get_odps_instance(project)
+    odps_ranked_df = odps.DataFrame(final_rank_df)
+
+    video_df = get_dataframe_from_odps('videoods', 'wx_video')
+    video_df['cover_url'] = video_df['cover_img_path'] + CDN_IMG_OPERATOR
+    video_df = video_df['id', 'title', 'cover_url']
+    final_df = odps_ranked_df.join(video_df, on=('video_id', 'id'))
+
+    final_df = final_df.to_pandas()
+    final_df = final_df[['strategy_key', 'dt_version', 'gh_id', 'sort', 'video_id', 'title', 'cover_url', 'score']]
+
+    # reverse sending order
+    final_df['sort'] = SEND_N + 1 - final_df['sort']
+
+    if dry_run:
+        print(final_df[['strategy_key', 'gh_id', 'sort', 'video_id', 'score', 'title']])
+        return
+
+    # save to ODPS
+    t = odps_instance.get_table(ODPS_WE_COM_RANK_RESULT_TABLE)
+    part_spec_dict = {'dt': run_dt, 'hour': run_hour, 'ctime': dt_version}
+    part_spec = ','.join(['{}={}'.format(k, part_spec_dict[k]) for k in part_spec_dict.keys()])
+    with t.open_writer(partition=part_spec, create_partition=True, overwrite=True) as writer:
+        writer.write(list(final_df.itertuples(index=False)))
+
+    # sync to MySQL
+    data_to_insert = [tuple(row) for row in final_df.itertuples(index=False)]
+    data_columns = list(final_df.columns)
+    mysql = MysqlHelper(CONFIG.MYSQL_CRAWLER_INFO)
+    mysql.batch_insert(RDS_RANK_RESULT_TABLE, data_to_insert, data_columns)
+
+
+def main_loop():
+    argparser = ArgumentParser()
+    argparser.add_argument('-n', '--dry-run', action='store_true')
+    args = argparser.parse_args()
+
+    try:
+        now_date = datetime.today()
+        LOGGER.info(f"开始执行: {datetime.strftime(now_date, '%Y-%m-%d %H:%M')}")
+        now_hour = now_date.strftime("%H")
+
+        last_date = now_date - timedelta(1)
+        last_dt = last_date.strftime("%Y%m%d")
+        # 查看当前天级更新的数据是否已准备好
+        # 当前上游统计表为天级更新,但字段设计为兼容小时级
+        h_data_count = check_data_partition(ODS_PROJECT, GH_REPLY_STATS_TABLE, last_dt, '00')
+        if h_data_count > 0:
+            LOGGER.info('上游数据表查询数据条数={},开始计算'.format(h_data_count))
+            run_dt = now_date.strftime("%Y%m%d")
+            LOGGER.info(f'run_dt: {run_dt}, run_hour: {now_hour}')
+            build_and_transfer_data(run_dt, now_hour, ODS_PROJECT,
+                                    dry_run=args.dry_run)
+            LOGGER.info('数据更新完成')
+        else:
+            LOGGER.info("上游数据未就绪,等待60s")
+            Timer(60, main_loop).start()
+        return
+    except Exception as e:
+        LOGGER.error(f"数据更新失败, exception: {e}, traceback: {traceback.format_exc()}")
+        if CONFIG.ENV_TEXT == '开发环境':
+            return
+        send_msg_to_feishu(
+            webhook=CONFIG.FEISHU_ROBOT['server_robot'].get('webhook'),
+            key_word=CONFIG.FEISHU_ROBOT['server_robot'].get('key_word'),
+            msg_text=f"rov-offline{CONFIG.ENV_TEXT} - 数据更新失败\n"
+                     f"exception: {e}\n"
+                     f"traceback: {traceback.format_exc()}"
+        )
+
+
+if __name__ == '__main__':
+    LOGGER.info("%s 开始执行" % os.path.basename(__file__))
+    LOGGER.info(f"environment: {CONFIG.ENV_TEXT}")
+    main_loop()