Prechádzať zdrojové kódy

分析python脚本
修复失败退出状态码问题

zhangbo 5 mesiacov pred
rodič
commit
caa8fe657d

+ 2 - 1
write_redis/alg_recsys_feature_08_vidh24predv2_redis.py

@@ -189,9 +189,10 @@ if __name__ == '__main__':
         exit_code = process.exitcode
         if exit_code != 0:
             print(f"子进程以状态码 {exit_code} 退出,执行失败。")
-            sys.exit(exit_code)  # 将子进程的状态码返回到主进程
+            os._exit(exit_code)  # 将子进程的状态码返回到主进程
         else:
             print("子进程正常结束。")
+            os._exit(0)
 
 
 

+ 3 - 0
write_redis/alg_recsys_feature_08_vidh24predv2_redis_task.sh

@@ -1,6 +1,9 @@
 source /etc/profile
 echo $ROV_OFFLINE_ENV
 
+SHELL=/bin/bash
+PATH=/usr/local/bin:/usr/bin:/bin
+
 log_dir="my_logs_08"
 if [ ! -d ${log_dir} ]; then
     mkdir ${log_dir}

+ 100 - 0
write_redis/分析-模拟在线打分的AUC.py

@@ -0,0 +1,100 @@
+import pandas as pd
+import numpy as np
+import xgboost as xgb
+from tqdm import tqdm
+import sys
+from scipy.stats import pearsonr
+from itertools import combinations
+from bisect import bisect_left, bisect_right, insort
+
+
+
+def apply_title(row):
+    try:
+        return row.replace("\n", "")
+    except Exception as e:
+        print(str(e))
+        return row
+def func_make_data(file_path: str):
+    df_list = [pd.read_csv(file) for file in file_path.split(",")]
+    df = pd.concat(df_list, ignore_index=True)
+    print(df.columns.tolist())
+    # df["title"] = df["title"].apply(apply_title)
+    for col in [
+        '日期', '小时', 'rank',
+        '曝光量', '分享次数', '多层回流人数', 'return_rate', 'return_cnt',
+        'str', 'rosn', 'rovn', 'vovh24',
+        'score_552', 'score_562', 'score_567',
+         'fmrov', 'hasreturnrovscore', 'vov_score_562', 'vov_score_567'
+    ]:
+        df[col] = pd.to_numeric(df[col], errors='coerce')
+    for col in ['实验组']:
+        if col not in df.columns:
+            df[col] = "无"
+        else:
+            df[col] = df[col].astype(str)
+    df["score_552_offline"] = df["fmrov"] * (1 + df["hasreturnrovscore"])
+    df["score_562_offline"] = df["fmrov"] * (1 + df["hasreturnrovscore"]) * (1 + 1 * df["vov_score_562"])
+    df["score_567_offline"] = df["fmrov"] * (1 + df["hasreturnrovscore"]) + 0.05 * df["vov_score_567"]
+    df.fillna(0, inplace=True)
+    df = df[df["实验组"] != "未知"].reset_index(drop=True)
+    return df
+
+# 计算每个 hour 的皮尔逊相关系数
+def calculate_correlation(group, col_a, col_b):
+    a = group[col_a]
+    b = group[col_b]
+    return pearsonr(a, b)[0]  # 取皮尔逊相关系数
+
+def calculate_auc_v2(group, col_a, col_b):
+    sorted_group = group.sort_values(by=col_a).reset_index(drop=True)
+    success_count = 0
+    b_list = []  # 维护一个有序的 col_b 列值列表
+    for index, row in sorted_group.iterrows():
+        current_a = row[col_a]
+        current_b = row[col_b]
+        # 使用二分法查找 b_list 中当前值的位置
+        pos_left = bisect_left(b_list, current_b)
+        pos_right = bisect_right(b_list, current_b)
+        pos = pos_left + (pos_right - pos_left)/2
+        # 成功的计数:所有当前值之前的值都小于等于 current_a
+        success_count += pos  # 在 b_list 中的值个数即为成功计数
+        # 插入当前 col_b 值到 b_list 中
+        insort(b_list, current_b)
+    # 计算成功概率
+    total_combinations = len(group) * (len(group) - 1) / 2
+    success_probability = success_count / total_combinations if total_combinations > 0 else 0
+    return success_probability
+
+def func(df, rank_limit, col_a, col_b):
+    c = df[df["rank"] <= rank_limit].groupby(['小时', '实验组']).apply(calculate_auc_v2, col_a=col_a, col_b=col_b).reset_index()
+    c.columns = ['小时', '实验组', col_a + "-" + col_b]
+    print("完成:{}和{}的计算。".format(col_a, col_b))
+    return c
+
+try:
+    date_train = sys.argv[1]
+except Exception as e:
+    date_train = "~/Downloads/20241109_top1000(1).csv"
+df = func_make_data(date_train)
+for rank_limit in [100, 500, 1000]:
+    print("date_train:rank_limit:{}-{}".format(date_train, rank_limit))
+    df_01 = func(df, rank_limit, "vovh24", "score_552")
+    df_02 = func(df, rank_limit, "vovh24", "score_562")
+    df_03 = func(df, rank_limit, "vovh24", "score_567")
+    df_04 = func(df, rank_limit, "rovn", "score_552")
+    df_05 = func(df, rank_limit, "rovn", "score_562")
+    df_06 = func(df, rank_limit, "rovn", "score_567")
+    df_07 = func(df, rank_limit, "vovh24", "score_552_offline")
+    df_08 = func(df, rank_limit, "vovh24", "score_562_offline")
+    df_09 = func(df, rank_limit, "vovh24", "score_567_offline")
+    df_10 = func(df, rank_limit, "rovn", "score_552_offline")
+    df_11 = func(df, rank_limit, "rovn", "score_562_offline")
+    df_12 = func(df, rank_limit, "rovn", "score_567_offline")
+    df_list = [df_01, df_02, df_03, df_04, df_05, df_06, df_07, df_08, df_09, df_10, df_11, df_12]
+    df_merged = pd.concat(df_list, axis=1)
+    df_select = df_merged.iloc[:, [0] + [3*i+2 for i in range(len(df_list))]]
+    df_select.to_csv("产品4_20241109_top1000-相关性-top{}.csv".format(rank_limit), index=False)
+
+
+

+ 92 - 0
write_redis/分析-自身在线打分的AUC.py

@@ -0,0 +1,92 @@
+import pandas as pd
+import numpy as np
+import xgboost as xgb
+from tqdm import tqdm
+import sys
+from scipy.stats import pearsonr
+from itertools import combinations
+from bisect import bisect_left, bisect_right, insort
+
+
+
+def apply_title(row):
+    try:
+        return row.replace("\n", "")
+    except Exception as e:
+        print(str(e))
+        return row
+def func_make_data(file_path: str):
+    df_list = [pd.read_csv(file) for file in file_path.split(",")]
+    df = pd.concat(df_list, ignore_index=True)
+    print(df.columns.tolist())
+    # df["title"] = df["title"].apply(apply_title)
+    for col in [
+        '日期', '小时', 'rank',
+        '曝光量', '分享次数', '多层回流人数', 'return_rate', 'return_cnt',
+        'str', 'rosn', 'rovn', 'vovh24',
+        'score', 'fmrov', 'hasreturnrovscore', 'alpha_vov', 'vovscore'
+    ]:
+        df[col] = pd.to_numeric(df[col], errors='coerce')
+    for col in ['实验组']:
+        if col not in df.columns:
+            df[col] = "无"
+        else:
+            df[col] = df[col].astype(str)
+    df["p_rov"] = df["fmrov"] * (1 + df["hasreturnrovscore"])
+    df.fillna(0, inplace=True)
+    df = df[df["实验组"] != "未知"].reset_index(drop=True)
+    return df
+
+# 计算每个 hour 的皮尔逊相关系数
+def calculate_correlation(group, col_a, col_b):
+    a = group[col_a]
+    b = group[col_b]
+    return pearsonr(a, b)[0]  # 取皮尔逊相关系数
+
+def calculate_auc_v2(group, col_a, col_b):
+    sorted_group = group.sort_values(by=col_a).reset_index(drop=True)
+    success_count = 0
+    b_list = []  # 维护一个有序的 col_b 列值列表
+    for index, row in sorted_group.iterrows():
+        current_a = row[col_a]
+        current_b = row[col_b]
+        # 使用二分法查找 b_list 中当前值的位置
+        pos_left = bisect_left(b_list, current_b)
+        pos_right = bisect_right(b_list, current_b)
+        pos = pos_left + (pos_right - pos_left)/2
+        # 成功的计数:所有当前值之前的值都小于等于 current_a
+        success_count += pos  # 在 b_list 中的值个数即为成功计数
+        # 插入当前 col_b 值到 b_list 中
+        insort(b_list, current_b)
+    # 计算成功概率
+    total_combinations = len(group) * (len(group) - 1) / 2
+    success_probability = success_count / total_combinations if total_combinations > 0 else 0
+    return success_probability
+
+def func(df, rank_limit, col_a, col_b):
+    c = df[df["rank"] <= rank_limit].groupby(['小时', '实验组']).apply(calculate_auc_v2, col_a=col_a, col_b=col_b).reset_index()
+    c.columns = ['小时', '实验组', col_a + "-" + col_b]
+    print("完成:{}和{}的计算。".format(col_a, col_b))
+    return c
+
+try:
+    date_train = sys.argv[1]
+except Exception as e:
+    date_train = "~/Downloads/20241105.csv"
+df = func_make_data(date_train)
+for rank_limit in [100, 500, 1000]:
+    print("date_train:rank_limit:{}-{}".format(date_train, rank_limit))
+    df_01 = func(df, rank_limit, "vovh24", "score")
+    df_02 = func(df, rank_limit, "rovn", "score")
+    df_list = []
+    for df_tmp in [df_01, df_02]:
+        for experiment in ["552", "562", "567"]:
+            df_list.append(
+                df_tmp[df_tmp["实验组"] == experiment].reset_index(drop=True)
+            )
+    df_merged = pd.concat(df_list, axis=1)
+    df_select = df_merged.iloc[:, [0] + [3*i+2 for i in range(len(df_list))]]
+    df_select.to_csv("20241105-相关性-top{}.csv".format(rank_limit), index=False)
+
+
+