vov_h0_train.py 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. import numpy as np
  2. import pandas as pd
  3. from scipy.optimize import minimize
  4. from sklearn.metrics import r2_score
  5. from sklearn.model_selection import train_test_split
  6. import pickle
  7. # 1. 加载数据
  8. def load_data(file_path):
  9. df = pd.read_csv(file_path, na_values='\\N')
  10. return df
  11. # 2. 数据预处理
  12. def preprocess_data(df, features, target, exposure_col, top_k):
  13. # 按曝光量排序并选择 Top k 数据
  14. df_sorted = df.sort_values(by=exposure_col, ascending=False)
  15. df_topk = df_sorted.head(top_k)
  16. X = df_topk[features]
  17. y = df_topk[target]
  18. # 获取 Top K 对应的曝光阈值
  19. exposure_threshold = df_topk[exposure_col].min()
  20. return X, y, exposure_threshold,df_topk
  21. # 3. 计算相关系数
  22. def calculate_correlations(df, features, target):
  23. correlations = {}
  24. for feature in features:
  25. # 删除 target 或 feature 列中任一为空的行
  26. valid_data = df[[target, feature]].dropna()
  27. # 如果没有有效数据,相关系数设为 0
  28. if len(valid_data) == 0:
  29. correlations[feature] = 0
  30. else:
  31. # 计算相关系数
  32. corr = valid_data[target].corr(valid_data[feature])
  33. correlations[feature] = corr if not np.isnan(corr) else 0
  34. # 转换为 Series 并按绝对值大小排序
  35. corr_series = pd.Series(correlations).abs().sort_values(ascending=False)
  36. return corr_series
  37. # 4. 定义动态加权和函数
  38. def dynamic_weighted_sum(features, weights):
  39. valid_features = ~np.isnan(features)
  40. if np.sum(valid_features) == 0:
  41. return np.nan
  42. normalized_weights = weights[valid_features] / np.sum(weights[valid_features])
  43. return np.sum(features[valid_features] * normalized_weights)
  44. # 5. 定义损失函数
  45. def mse_loss(y_true, y_pred):
  46. valid = ~np.isnan(y_true) & ~np.isnan(y_pred)
  47. return np.mean((y_true[valid] - y_pred[valid])**2)
  48. # 6. 定义目标函数
  49. def objective(weights, X, y_true):
  50. y_pred = np.array([dynamic_weighted_sum(x, weights) for x in X.values])
  51. return mse_loss(y_true, y_pred)
  52. # 7. 搜索最佳权重
  53. def find_best_weights(X, y, initial_weights):
  54. result = minimize(objective, initial_weights, args=(X, y), method='Nelder-Mead')
  55. return result.x
  56. # 8. 评估模型
  57. def evaluate_model(X, y, weights):
  58. y_pred = np.array([dynamic_weighted_sum(x, weights) for x in X.values])
  59. valid = ~np.isnan(y) & ~np.isnan(y_pred)
  60. r2 = r2_score(y[valid], y_pred[valid])
  61. mse = mse_loss(y, y_pred)
  62. return r2, mse
  63. # 9. 保存模型
  64. def save_model(weights, features, exposure_threshold,top_k, file_path):
  65. model = {
  66. 'weights': weights,
  67. 'features': features,
  68. 'exposure_threshold': exposure_threshold,
  69. 'top_k':top_k
  70. }
  71. with open(file_path, 'wb') as f:
  72. pickle.dump(model, f)
  73. # 10. 加载模型
  74. def load_model(file_path):
  75. with open(file_path, 'rb') as f:
  76. model = pickle.load(f)
  77. return model['weights'], model['features'], model['exposure_threshold'],model['top_k']
  78. # 12. 主函数
  79. def main():
  80. # 加载数据
  81. df = load_data('train_20240921.csv')
  82. # 定义特征、目标变量和曝光量列
  83. features = ['h1_ago_vov', 'h2_ago_vov', 'h3_ago_vov', 'h24_ago_vov', 'h48_ago_vov', 'd1_ago_vov', 'd2_ago_vov']
  84. target = 'cur_hour_vov'
  85. exposure_col = 'h1_ago_view' # 请确保你的数据中有这个列
  86. top_k = 1000 # 设置你想要使用的 Top k 数据点数量
  87. # 预处理数据
  88. X, y, exposure_threshold,df_topk = preprocess_data(df, features, target, exposure_col, top_k)
  89. # 划分训练集和测试集
  90. X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
  91. # 计算相关系数
  92. correlations = calculate_correlations(df_topk, features, target)
  93. print("Feature correlations:")
  94. print(correlations)
  95. # 使用相关系数作为初始权重
  96. initial_weights = correlations[features].values
  97. # 搜索最佳权重
  98. best_weights = find_best_weights(X_train, y_train, initial_weights)
  99. # 评估模型
  100. r2_train, mse_train = evaluate_model(X_train, y_train, best_weights)
  101. r2_test, mse_test = evaluate_model(X_test, y_test, best_weights)
  102. print(f"\nTrain R² Score: {r2_train:.4f}, MSE: {mse_train:.4f}")
  103. print(f"Test R² Score: {r2_test:.4f}, MSE: {mse_test:.4f}")
  104. # 输出特征重要性
  105. print("\nFeature importance:")
  106. for feature, weight in zip(features, best_weights):
  107. print(f"{feature}: {weight:.4f}")
  108. # 保存模型
  109. save_model(pd.Series(best_weights, index=features), features, exposure_threshold,top_k, 'top'+str(top_k)+'_linear_weighted_model.pkl')
  110. # 测试加载模型
  111. loaded_weights, loaded_features, loaded_threshold,topk = load_model('top'+str(top_k)+'_linear_weighted_model.pkl')
  112. print("\nLoaded model weights:")
  113. for feature, weight in loaded_weights.items():
  114. print(f"{feature}: {weight:.4f}")
  115. print(f"Exposure threshold: {loaded_threshold}")
  116. print(f"TopK: {topk}")
  117. if __name__ == "__main__":
  118. main()