main_spider.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. """
  2. 针对爬虫类型数据单独训练模型
  3. """
  4. import os
  5. import sys
  6. import json
  7. import optuna
  8. from sklearn.linear_model import LogisticRegression
  9. sys.path.append(os.getcwd())
  10. import numpy as np
  11. import pandas as pd
  12. import lightgbm as lgb
  13. from sklearn.preprocessing import LabelEncoder
  14. from sklearn.metrics import accuracy_score
  15. from sklearn.model_selection import train_test_split, StratifiedKFold
  16. from sklearn.datasets import load_breast_cancer
  17. from sklearn.metrics import roc_auc_score
  18. from bayes_opt import BayesianOptimization
  19. class LightGBM(object):
  20. """
  21. LightGBM model for classification
  22. """
  23. def __init__(self, flag, dt):
  24. self.label_encoder = LabelEncoder()
  25. self.my_c = [
  26. "channel",
  27. "out_user_id",
  28. "mode",
  29. "out_play_cnt",
  30. "out_like_cnt",
  31. "out_share_cnt",
  32. "lop",
  33. "duration",
  34. "tag1",
  35. "tag2",
  36. "tag3"
  37. ]
  38. self.str_columns = ["channel", "mode", "out_user_id", "tag1", "tag2", "tag3"]
  39. self.float_columns = [
  40. "out_play_cnt",
  41. "out_like_cnt",
  42. "out_share_cnt",
  43. "lop",
  44. "duration"
  45. ]
  46. self.split_c = 0.7
  47. self.yc = 0.8
  48. self.model = "models/lightgbm_0401_spider.bin"
  49. self.flag = flag
  50. self.dt = dt
  51. def read_data(self):
  52. """
  53. Read data from local
  54. :return:
  55. """
  56. path = "data/train_data/spider_data_240401.json"
  57. df = pd.read_json(path)
  58. df = df.dropna(subset=['label'])
  59. labels = df['label']
  60. features = df.drop("label", axis=1)
  61. for key in self.float_columns:
  62. features[key] = pd.to_numeric(features[key], errors="coerce")
  63. for key in self.str_columns:
  64. features[key] = self.label_encoder.fit_transform(features[key])
  65. return features, labels
  66. def bays(self):
  67. # 创建LightGBM数据集,注意不要在这里指定categorical_feature,因为我们使用的是玩具数据集
  68. x, y = self.read_data()
  69. train_size = int(len(x) * 0.9)
  70. X_train, X_test = x[:train_size], x[train_size:]
  71. Y_train, Y_test = y[:train_size], y[train_size:]
  72. train_data = lgb.Dataset(X_train, label=Y_train)
  73. def lgbm_eval(num_leaves, learning_rate, feature_fraction, bagging_fraction, bagging_freq, min_child_samples):
  74. params = {
  75. 'objective': 'binary',
  76. 'metric': 'auc',
  77. 'verbose': -1,
  78. 'num_leaves': int(num_leaves),
  79. 'learning_rate': learning_rate,
  80. 'feature_fraction': feature_fraction,
  81. 'bagging_fraction': bagging_fraction,
  82. 'bagging_freq': int(bagging_freq),
  83. 'min_child_samples': int(min_child_samples),
  84. }
  85. cv_result = lgb.cv(params, train_data, nfold=5, seed=42, stratified=True, metrics=['auc'],
  86. early_stopping_rounds=10)
  87. return max(cv_result['auc-mean'])
  88. param_bounds = {
  89. 'num_leaves': (20, 40),
  90. 'learning_rate': (1e-4, 1e-2),
  91. 'feature_fraction': (0.5, 0.8),
  92. 'bagging_fraction': (0.5, 0.8),
  93. 'bagging_freq': (1, 10),
  94. 'min_child_samples': (20, 100),
  95. }
  96. optimizer = BayesianOptimization(f=lgbm_eval, pbounds=param_bounds, random_state=42)
  97. optimizer.maximize(init_points=5, n_iter=25)
  98. print("Best Parameters:", optimizer.max['params'])
  99. def train_model(self):
  100. """
  101. Load dataset
  102. :return:
  103. """
  104. x, y = self.read_data()
  105. train_size = int(len(x) * self.split_c)
  106. X_train, X_test = x[:train_size], x[train_size:]
  107. Y_train, Y_test = y[:train_size], y[train_size:]
  108. train_data = lgb.Dataset(
  109. X_train,
  110. label=Y_train,
  111. categorical_feature=["channel", "mode", "out_user_id", "tag1", "tag2", "tag3"],
  112. )
  113. test_data = lgb.Dataset(X_test, label=Y_test, reference=train_data)
  114. params = {
  115. 'num_leaves': 31,
  116. 'learning_rate': 0.00020616904432655601,
  117. 'feature_fraction': 0.6508847259863764,
  118. 'bagging_fraction': 0.7536774652478249,
  119. 'bagging_freq': 6,
  120. 'min_child_samples': 99,
  121. 'num_threads': 16
  122. }
  123. # 训练模型
  124. num_round = 100
  125. print("开始训练......")
  126. bst = lgb.train(params, train_data, num_round, valid_sets=[test_data])
  127. bst.save_model(self.model)
  128. print("模型训练完成✅")
  129. def evaluate_model(self):
  130. """
  131. 评估模型性能
  132. :return:
  133. """
  134. fw = open("summary_tag_03{}_spider.txt".format(self.dt), "a+", encoding="utf-8")
  135. # 测试数据
  136. with open("data/produce_data/x_data_total_return_predict_{}_spider.json".format(self.dt)) as f1:
  137. x_list = json.loads(f1.read())
  138. # 测试 label
  139. with open("data/produce_data/y_data_total_return_predict_{}_spider.json".format(self.dt)) as f2:
  140. Y_test = json.loads(f2.read())
  141. Y_test = [0 if i <= 19 else 1 for i in Y_test]
  142. X_test = pd.DataFrame(x_list, columns=self.my_c)
  143. for key in self.str_columns:
  144. X_test[key] = self.label_encoder.fit_transform(X_test[key])
  145. for key in self.float_columns:
  146. X_test[key] = pd.to_numeric(X_test[key], errors="coerce")
  147. bst = lgb.Booster(model_file=self.model)
  148. y_pred = bst.predict(X_test, num_iteration=bst.best_iteration)
  149. temp = sorted(list(y_pred))
  150. yuzhi = temp[int(len(temp) * 0.7) - 1]
  151. y_pred_binary = [0 if i <= yuzhi else 1 for i in list(y_pred)]
  152. # 转换为二进制输出
  153. score_list = []
  154. for index, item in enumerate(list(y_pred)):
  155. real_label = Y_test[index]
  156. score = item
  157. prid_label = y_pred_binary[index]
  158. print(real_label, "\t", prid_label, "\t", score)
  159. fw.write("{}\t{}\t{}\n".format(real_label, prid_label, score))
  160. score_list.append(score)
  161. print("预测样本总量: {}".format(len(score_list)))
  162. data_series = pd.Series(score_list)
  163. print("统计 score 信息")
  164. print(data_series.describe())
  165. # 评估模型
  166. accuracy = accuracy_score(Y_test, y_pred_binary)
  167. print(f"Accuracy: {accuracy}")
  168. fw.close()
  169. def feature_importance(self):
  170. """
  171. Get the importance of each feature
  172. :return:
  173. """
  174. lgb_model = lgb.Booster(model_file=self.model)
  175. importance = lgb_model.feature_importance(importance_type='split')
  176. feature_name = lgb_model.feature_name()
  177. feature_importance = sorted(zip(feature_name, importance), key=lambda x: x[1], reverse=True)
  178. # 打印特征重要性
  179. for name, imp in feature_importance:
  180. print(name, imp)
  181. if __name__ == "__main__":
  182. # i = int(input("输入 1 训练, 输入 2 预测:\n"))
  183. # if i == 1:
  184. # f = "train"
  185. # dt = "whole"
  186. # L = LightGBM(flag=f, dt=dt)
  187. # L.train_model()
  188. # elif i == 2:
  189. # f = "predict"
  190. # dt = int(input("输入日期, 16-21:\n"))
  191. # L = LightGBM(flag=f, dt=dt)
  192. # L.evaluate_model()
  193. # L.feature_importance()
  194. L = LightGBM("train", "whole")
  195. L.bays()
  196. # study = optuna.create_study(direction='maximize')
  197. # study.optimize(L.bays_params, n_trials=100)
  198. # print('Number of finished trials:', len(study.trials))
  199. # print('Best trial:', study.best_trial.params)