Explorar o código

指定账号过滤审核不通过视频

xueyiming hai 1 mes
pai
achega
81d92d612d
Modificáronse 2 ficheiros con 77 adicións e 12 borrados
  1. 59 0
      alg_growth_common.py
  2. 18 12
      alg_growth_gh_reply_video_v1.py

+ 59 - 0
alg_growth_common.py

@@ -7,9 +7,23 @@
 """
 Common functions for growth jobs
 """
+import pandas as pd
+
+from my_utils import request_post
 
 UNSAFE_VIDEO_IDS = [14403867, 13696461, 13671819, 13587868, 13680796, 14050873,
                     26348326, 28623786]
+AUDIT_APPROVED_GH_IDS = ['gh_b63b9dde3f4b','gh_330ef0db846d','gh_330ef0db846d','gh_e2318164f869','gh_620af8e24fb9',
+'gh_620af8e24fb9','gh_133c36b99b14','gh_133c36b99b14','gh_5ac72e2b9130','gh_ef8ade0fad92',
+'gh_1e03b6de22bf','gh_5538fe297e59','gh_8c6fffcbaac1','gh_8c6fffcbaac1','gh_d0e830b7547e',
+'gh_fb234f4e32a5','gh_84c5d01a61e7','gh_87c4b8ae885e','gh_29d8a63d5e5e','gh_b144210318e5',
+'gh_b144210318e5','gh_1f9bf4cfa788','gh_4f47d12bbe04','gh_8c6af276df98','gh_1f16bc6ac60d',
+'gh_4920bc4c5720','gh_5177a8c57917','gh_5177a8c57917','gh_5e3e6cd5e35c','gh_5e3e6cd5e35c',
+'gh_d2c72bcc05c9','gh_d2c72bcc05c9','gh_5f2400da935c','gh_5f2400da935c','gh_669555ebea28',
+'gh_28ce883486c3','gh_28ce883486c3','gh_7057ef30222b','gh_7057ef30222b','gh_b0048adc0b46',
+'gh_6e61a2d5db85','gh_01cd19465b39','gh_01cd19465b39','gh_126c99b39cea','gh_4a1174e36ceb',
+'gh_f81c27eb8c48','gh_f81c27eb8c48','gh_3170dc15e246','gh_1ccfb5620605','gh_315be76a746d',
+'gh_4f47d12bbe04','gh_4f47d12bbe04','gh_4f47d12bbe04']
 
 def check_unsafe_video(df, force_replace=True):
     unsafe_video_condition = ','.join([str(x) for x in UNSAFE_VIDEO_IDS])
@@ -20,7 +34,52 @@ def check_unsafe_video(df, force_replace=True):
             raise Exception("video unsafe")
         df.loc[unsafe_rows.index, 'video_id'] = 20463342
 
+
 def filter_unsafe_video(df):
     unsafe_video_condition = ','.join([str(x) for x in UNSAFE_VIDEO_IDS])
     df = df.query(f'video_id not in ({unsafe_video_condition})')
     return df
+
+
+
+
+def filter_audit_failed_video(df):
+    video_id_list = df['video_id'].tolist()
+    chunk_size = 20
+    result = [video_id_list[i:i + chunk_size] for i in range(0, len(video_id_list), chunk_size)]
+    video_audit_failure = []
+    for chunk in result:
+        json_structure = {
+            "videoIdList": chunk
+        }
+        result = request_post('https://longvideoapi.piaoquantv.com/longvideoapi/openapi/video/batchSelectVideoInfo',
+                              json_structure)
+        if result is None:
+            continue
+        if result['code'] != 0:
+            continue
+        for item in result['data']:
+            if item['auditStatus'] != 5 or item['appAuditStatus'] != 5:
+                video_audit_failure.append(item['id'])
+    condition = (df['video_id'].isin(video_audit_failure)) & (df['gh_id'].isin(AUDIT_APPROVED_GH_IDS))
+    filtered_df = df[~condition]
+    return filtered_df
+
+
+if __name__ == '__main__':
+    # 定义包含 ID 的字符串
+    id_str = '20463342,12794884,13788955,13586800,4780859,33332362,19175397,4555247,14403867,12117356,14050873,14142458,17638023,14945734,13680796,13042177,10587660,14552795,12418493,12700411,13671819,13825547,12166346,13587868,19096953,14095344,13817005,1275943,13437896,12492103'
+
+    # 将字符串按逗号分割成列表
+    id_list = id_str.split(',')
+
+    # 将列表中的元素转换为整数(如果需要)
+    id_list = [int(id) for id in id_list]
+
+    # 创建 DataFrame 并将 ID 列表放入 'video_id' 列
+    df = pd.DataFrame({'video_id': id_list})
+    video_audit_failure = filter_audit_failed_video(df)
+    print(df)
+    print(video_audit_failure)
+    filtered_df = df[~df['video_id'].isin(video_audit_failure)]
+    print(filtered_df)

+ 18 - 12
alg_growth_gh_reply_video_v1.py

@@ -16,7 +16,7 @@ from log import Log
 import os
 from argparse import ArgumentParser
 from constants import AutoReplyAccountType
-from alg_growth_common import check_unsafe_video, filter_unsafe_video
+from alg_growth_common import check_unsafe_video, filter_unsafe_video, filter_audit_failed_video
 
 CONFIG, _ = set_config()
 LOGGER = Log()
@@ -41,6 +41,7 @@ SEND_N = 2
 
 pd.set_option('display.max_rows', None)
 
+
 def get_and_update_gh_ids(run_dt):
     gh = get_odps_df_of_max_partition(ODS_PROJECT, GH_DETAIL, {'dt': run_dt})
     gh = gh.to_pandas()
@@ -73,7 +74,7 @@ def check_data_partition(project, table, data_dt, data_hr=None):
 
 def get_last_strategy_result(project, rank_table, dt_version, key):
     strategy_df = get_odps_df_of_max_partition(
-        project, rank_table, { 'ctime': dt_version }
+        project, rank_table, {'ctime': dt_version}
     ).to_pandas()
     sub_df = strategy_df.query(f'strategy_key == "{key}"')
     sub_df = sub_df[['gh_id', 'video_id', 'strategy_key', 'sort']].drop_duplicates()
@@ -165,6 +166,7 @@ def rank_for_layer1(run_dt, run_hour, project, table, gh_df):
     result_df = extend_df[['strategy_key', 'dt_version', 'gh_id', 'sort', 'video_id', 'score']]
     return result_df
 
+
 def rank_for_layer2(run_dt, run_hour, rank_table):
     stats_df = process_reply_stats(
         ODS_PROJECT, GH_REPLY_STATS_TABLE, GH_REPLY_STATS_HOUR_TABLE,
@@ -172,7 +174,7 @@ def rank_for_layer2(run_dt, run_hour, rank_table):
 
     # 确保重跑时可获得一致结果
     dt_version = f'{run_dt}{run_hour}'
-    np.random.seed(int(dt_version)+1)
+    np.random.seed(int(dt_version) + 1)
     # TODO: 计算账号间相关性
     ## 账号两两组合,取有RoVn数值视频的交集,单个账号内的RoVn(平滑后)组成向量
     ## 求向量相关系数或cosine相似度
@@ -188,10 +190,12 @@ def rank_for_layer2(run_dt, run_hour, rank_table):
 
     # 基础过滤for账号
     df = stats_df.query('day0_return > 100')
-
+    # 目标账号失效视频过滤
+    df = filter_audit_failed_video(df)
     # fallback to base if necessary
     base_strategy_df = get_last_strategy_result(
         ODS_PROJECT, rank_table, dt_version, BASE_GROUP_NAME)
+    base_strategy_df = filter_audit_failed_video(base_strategy_df)
     for gh_id in GH_IDS:
         if gh_id == 'default':
             continue
@@ -212,12 +216,13 @@ def rank_for_layer2(run_dt, run_hour, rank_table):
     result_df = extend_df[['strategy_key', 'dt_version', 'gh_id', 'sort', 'video_id', 'score']]
     return result_df
 
+
 def rank_for_base(run_dt, run_hour, rank_table):
     stats_df = process_reply_stats(
         ODS_PROJECT, GH_REPLY_STATS_TABLE, GH_REPLY_STATS_HOUR_TABLE,
         STATS_PERIOD_DAYS, run_dt, run_hour)
 
-    #TODO: support to set base manually
+    # TODO: support to set base manually
     dt_version = f'{run_dt}{run_hour}'
 
     # 获取当前base信息, 策略表dt_version(ctime partition)采用当前时间
@@ -234,22 +239,23 @@ def rank_for_base(run_dt, run_hour, rank_table):
 
     stats_with_strategy_df = stats_df \
         .merge(
-            base_strategy_df,
-            on=['gh_id', 'video_id'],
-            how='left') \
+        base_strategy_df,
+        on=['gh_id', 'video_id'],
+        how='left') \
         .query('strategy_key.notna() or score > 0.1')
 
     # 合并default和分账号数据
     grouped_stats_df = pd.concat([default_stats_df, stats_with_strategy_df]).reset_index()
-
+    grouped_stats_df = filter_audit_failed_video(grouped_stats_df)
     def set_top_n(group, n=2):
         group_sorted = group.sort_values(by='score', ascending=False)
         top_n = group_sorted.head(n)
         top_n['sort'] = range(1, n + 1)
         return top_n
+
     ranked_df = grouped_stats_df.groupby('gh_id').apply(set_top_n, SEND_N)
     ranked_df = ranked_df.reset_index(drop=True)
-    #ranked_df['sort'] = grouped_stats_df.groupby('gh_id')['score'].rank(ascending=False)
+    # ranked_df['sort'] = grouped_stats_df.groupby('gh_id')['score'].rank(ascending=False)
     ranked_df['strategy_key'] = BASE_GROUP_NAME
     ranked_df['dt_version'] = dt_version
     ranked_df = ranked_df[['strategy_key', 'dt_version', 'gh_id', 'sort', 'video_id', 'score']]
@@ -273,7 +279,7 @@ def build_and_transfer_data(run_dt, run_hour, project, **kwargs):
     gh_df = get_and_update_gh_ids(next_dt)
 
     layer1_rank = rank_for_layer1(run_dt, run_hour, ODS_PROJECT, EXPLORE_POOL_TABLE, gh_df)
-    layer2_rank = rank_for_layer2( run_dt, run_hour, ODPS_RANK_RESULT_TABLE)
+    layer2_rank = rank_for_layer2(run_dt, run_hour, ODPS_RANK_RESULT_TABLE)
     base_rank = rank_for_base(run_dt, run_hour, ODPS_RANK_RESULT_TABLE)
     final_rank_df = pd.concat([layer1_rank, layer2_rank, base_rank]).reset_index(drop=True)
     check_result_data(final_rank_df)
@@ -314,7 +320,7 @@ def build_and_transfer_data(run_dt, run_hour, project, **kwargs):
 def main_loop():
     argparser = ArgumentParser()
     argparser.add_argument('-n', '--dry-run', action='store_true')
-    argparser.add_argument('--run-at',help='assume to run at date and hour, yyyyMMddHH')
+    argparser.add_argument('--run-at', help='assume to run at date and hour, yyyyMMddHH')
     args = argparser.parse_args()
 
     run_date = datetime.today()