main.py 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. import os
  2. import sys
  3. import json
  4. import optuna
  5. from sklearn.linear_model import LogisticRegression
  6. sys.path.append(os.getcwd())
  7. import numpy as np
  8. import pandas as pd
  9. import lightgbm as lgb
  10. from sklearn.preprocessing import LabelEncoder
  11. from sklearn.metrics import accuracy_score
  12. class LightGBM(object):
  13. """
  14. LightGBM model for classification
  15. """
  16. def __init__(self):
  17. self.label_encoder = LabelEncoder()
  18. self.my_c = [
  19. "uid",
  20. "type",
  21. "channel",
  22. "fans",
  23. "view_count_user_30days",
  24. "share_count_user_30days",
  25. "return_count_user_30days",
  26. "rov_user",
  27. "str_user",
  28. "out_user_id",
  29. "mode",
  30. "out_play_cnt",
  31. "out_like_cnt",
  32. "out_share_cnt",
  33. "out_collection_cnt",
  34. "tag1",
  35. "tag2",
  36. "tag3"
  37. ]
  38. self.str_columns = ["uid", "type", "channel", "mode", "out_user_id", "tag1", "tag2", "tag3"]
  39. self.float_columns = [
  40. "fans",
  41. "view_count_user_30days",
  42. "share_count_user_30days",
  43. "return_count_user_30days",
  44. "rov_user",
  45. "str_user",
  46. "out_play_cnt",
  47. "out_like_cnt",
  48. "out_share_cnt",
  49. "out_collection_cnt",
  50. ]
  51. self.split_c = 0.99
  52. self.yc = 0.8
  53. self.model = "lightgbm_tag_train_04.bin"
  54. def bays_params(self, trial):
  55. """
  56. Bayesian parameters for
  57. :return: best parameters
  58. """
  59. # 定义搜索空间
  60. param = {
  61. 'objective': 'binary',
  62. 'metric': 'binary_logloss',
  63. 'verbosity': -1,
  64. 'boosting_type': 'gbdt',
  65. 'num_leaves': trial.suggest_int('num_leaves', 20, 40),
  66. 'learning_rate': trial.suggest_loguniform('learning_rate', 1e-8, 1.0),
  67. 'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0),
  68. 'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0),
  69. 'bagging_freq': trial.suggest_int('bagging_freq', 1, 7),
  70. 'min_child_samples': trial.suggest_int('min_child_samples', 5, 100),
  71. "num_threads": 16, # 线程数量
  72. }
  73. X_train, X_test = self.generate_x_data()
  74. Y_train, Y_test = self.generate_y_data()
  75. train_data = lgb.Dataset(
  76. X_train,
  77. label=Y_train,
  78. categorical_feature=["uid", "type", "channel", "mode", "out_user_id", "tag1", "tag2", "tag3"],
  79. )
  80. test_data = lgb.Dataset(X_test, label=Y_test, reference=train_data)
  81. gbm = lgb.train(param, train_data, num_boost_round=100, valid_sets=[test_data])
  82. preds = gbm.predict(X_test)
  83. pred_labels = np.rint(preds)
  84. accuracy = accuracy_score(Y_test, pred_labels)
  85. return accuracy
  86. def generate_x_data(self):
  87. """
  88. Generate data for feature engineering
  89. :return:
  90. """
  91. with open("produce_data/x_data_total_return_train.json") as f1:
  92. x_list = json.loads(f1.read())
  93. index_t = int(len(x_list) * self.split_c)
  94. X_train = pd.DataFrame(x_list[:index_t], columns=self.my_c)
  95. for key in self.str_columns:
  96. X_train[key] = self.label_encoder.fit_transform(X_train[key])
  97. for key in self.float_columns:
  98. X_train[key] = pd.to_numeric(X_train[key], errors="coerce")
  99. X_test = pd.DataFrame(x_list[index_t:], columns=self.my_c)
  100. for key in self.str_columns:
  101. X_test[key] = self.label_encoder.fit_transform(X_test[key])
  102. for key in self.float_columns:
  103. X_test[key] = pd.to_numeric(X_test[key], errors="coerce")
  104. return X_train, X_test
  105. def generate_y_data(self):
  106. """
  107. Generate data for label
  108. :return:
  109. """
  110. with open("produce_data/y_data_total_return_train.json") as f2:
  111. y_list = json.loads(f2.read())
  112. index_t = int(len(y_list) * self.split_c)
  113. temp = sorted(y_list)
  114. yuzhi = temp[int(len(temp) * self.yc) - 1]
  115. print("阈值是: {}".format(yuzhi))
  116. y__list = [0 if i <= yuzhi else 1 for i in y_list]
  117. y_train = np.array(y__list[:index_t])
  118. y_test = np.array(y__list[index_t:])
  119. return y_train, y_test
  120. def train_model(self):
  121. """
  122. Load dataset
  123. :return:
  124. """
  125. X_train, X_test = self.generate_x_data()
  126. Y_train, Y_test = self.generate_y_data()
  127. train_data = lgb.Dataset(
  128. X_train,
  129. label=Y_train,
  130. categorical_feature=["uid", "type", "channel", "mode", "out_user_id", "tag1", "tag2", "tag3"],
  131. )
  132. test_data = lgb.Dataset(X_test, label=Y_test, reference=train_data)
  133. params = {
  134. "objective": "binary", # 指定二分类任务
  135. "metric": "binary_logloss", # 评估指标为二分类的log损失
  136. "num_leaves": 36, # 叶子节点数
  137. "learning_rate": 0.08479152931388902, # 学习率
  138. "bagging_fraction": 0.6588121592044218, # 建树的样本采样比例
  139. "feature_fraction": 0.4572757903437793, # 建树的特征选择比例
  140. "bagging_freq": 2, # k 意味着每 k 次迭代执行bagging
  141. "num_threads": 16, # 线程数量
  142. "mini_child_samples": 71
  143. }
  144. # 训练模型
  145. num_round = 100
  146. print("开始训练......")
  147. bst = lgb.train(params, train_data, num_round, valid_sets=[test_data])
  148. bst.save_model(self.model)
  149. print("模型训练完成✅")
  150. def evaluate_model(self):
  151. """
  152. 评估模型性能
  153. :return:
  154. """
  155. fw = open("summary_tag_04.txt", "a+", encoding="utf-8")
  156. # 测试数据
  157. with open("produce_data/x_data_total_return_predict.json") as f1:
  158. x_list = json.loads(f1.read())
  159. # 测试 label
  160. with open("produce_data/y_data_total_return_predict.json") as f2:
  161. Y_test = json.loads(f2.read())
  162. Y_test = [0 if i <= 26 else 1 for i in Y_test]
  163. X_test = pd.DataFrame(x_list, columns=self.my_c)
  164. for key in self.str_columns:
  165. X_test[key] = self.label_encoder.fit_transform(X_test[key])
  166. for key in self.float_columns:
  167. X_test[key] = pd.to_numeric(X_test[key], errors="coerce")
  168. bst = lgb.Booster(model_file=self.model)
  169. y_pred = bst.predict(X_test, num_iteration=bst.best_iteration)
  170. y_pred_binary = [0 if i <= 0.147637 else 1 for i in list(y_pred)]
  171. # 转换为二进制输出
  172. score_list = []
  173. for index, item in enumerate(list(y_pred)):
  174. real_label = Y_test[index]
  175. score = item
  176. prid_label = y_pred_binary[index]
  177. print(real_label, "\t", prid_label, "\t", score)
  178. fw.write("{}\t{}\t{}\n".format(real_label, prid_label, score))
  179. score_list.append(score)
  180. print("预测样本总量: {}".format(len(score_list)))
  181. data_series = pd.Series(score_list)
  182. print("统计 score 信息")
  183. print(data_series.describe())
  184. # 评估模型
  185. accuracy = accuracy_score(Y_test, y_pred_binary)
  186. print(f"Accuracy: {accuracy}")
  187. fw.close()
  188. def feature_importance(self):
  189. """
  190. Get the importance of each feature
  191. :return:
  192. """
  193. lgb_model = lgb.Booster(model_file=self.model)
  194. importance = lgb_model.feature_importance(importance_type='split')
  195. feature_name = lgb_model.feature_name()
  196. feature_importance = sorted(zip(feature_name, importance), key=lambda x: x[1], reverse=True)
  197. # 打印特征重要性
  198. for name, imp in feature_importance:
  199. print(name, imp)
  200. if __name__ == "__main__":
  201. L = LightGBM()
  202. # study = optuna.create_study(direction='maximize')
  203. # study.optimize(L.bays_params, n_trials=100)
  204. # print('Number of finished trials:', len(study.trials))
  205. # print('Best trial:', study.best_trial.params)
  206. L.train_model()
  207. L.evaluate_model()
  208. L.feature_importance()