main.py 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. import os
  2. import sys
  3. import json
  4. from sklearn.linear_model import LogisticRegression
  5. sys.path.append(os.getcwd())
  6. import numpy as np
  7. import pandas as pd
  8. import lightgbm as lgb
  9. from sklearn.preprocessing import LabelEncoder
  10. from sklearn.metrics import accuracy_score
  11. class LightGBM(object):
  12. """
  13. LightGBM model for classification
  14. """
  15. def __init__(self):
  16. self.label_encoder = LabelEncoder()
  17. self.my_c = [
  18. "uid",
  19. "type",
  20. "channel",
  21. "fans",
  22. "view_count_user_30days",
  23. "share_count_user_30days",
  24. "return_count_user_30days",
  25. "rov_user",
  26. "str_user",
  27. "out_user_id",
  28. "mode",
  29. "out_play_cnt",
  30. "out_like_cnt",
  31. "out_share_cnt",
  32. "out_collection_cnt",
  33. "tag1",
  34. "tag2",
  35. "tag3"
  36. ]
  37. self.str_columns = ["uid", "type", "channel", "mode", "out_user_id", "tag1", "tag2", "tag3"]
  38. self.float_columns = [
  39. "fans",
  40. "view_count_user_30days",
  41. "share_count_user_30days",
  42. "return_count_user_30days",
  43. "rov_user",
  44. "str_user",
  45. "out_play_cnt",
  46. "out_like_cnt",
  47. "out_share_cnt",
  48. "out_collection_cnt",
  49. ]
  50. self.split_c = 0.98
  51. self.yc = 0.8
  52. self.model = "lightgbm_tag_train_01.bin"
  53. def generate_x_data(self):
  54. """
  55. Generate data for feature engineering
  56. :return:
  57. """
  58. with open("produce_data/x_data_total_return_train.json") as f1:
  59. x_list = json.loads(f1.read())
  60. index_t = int(len(x_list) * self.split_c)
  61. X_train = pd.DataFrame(x_list[:index_t], columns=self.my_c)
  62. for key in self.str_columns:
  63. X_train[key] = self.label_encoder.fit_transform(X_train[key])
  64. for key in self.float_columns:
  65. X_train[key] = pd.to_numeric(X_train[key], errors="coerce")
  66. X_test = pd.DataFrame(x_list[index_t:], columns=self.my_c)
  67. for key in self.str_columns:
  68. X_test[key] = self.label_encoder.fit_transform(X_test[key])
  69. for key in self.float_columns:
  70. X_test[key] = pd.to_numeric(X_test[key], errors="coerce")
  71. return X_train, X_test
  72. def generate_y_data(self):
  73. """
  74. Generate data for label
  75. :return:
  76. """
  77. with open("produce_data/y_data_total_return_train.json") as f2:
  78. y_list = json.loads(f2.read())
  79. index_t = int(len(y_list) * self.split_c)
  80. temp = sorted(y_list)
  81. yuzhi = temp[int(len(temp) * self.yc) - 1]
  82. print("阈值是: {}".format(yuzhi))
  83. y__list = [0 if i <= yuzhi else 1 for i in y_list]
  84. y_train = np.array(y__list[:index_t])
  85. y_test = np.array(y__list[index_t:])
  86. return y_train, y_test
  87. def train_model(self):
  88. """
  89. Load dataset
  90. :return:
  91. """
  92. X_train, X_test = self.generate_x_data()
  93. Y_train, Y_test = self.generate_y_data()
  94. train_data = lgb.Dataset(
  95. X_train,
  96. label=Y_train,
  97. categorical_feature=["uid", "type", "channel", "mode", "out_user_id", "tag1", "tag2", "tag3"],
  98. )
  99. test_data = lgb.Dataset(X_test, label=Y_test, reference=train_data)
  100. params = {
  101. "objective": "binary", # 指定二分类任务
  102. "metric": "binary_logloss", # 评估指标为二分类的log损失
  103. "num_leaves": 31, # 叶子节点数
  104. "learning_rate": 0.01, # 学习率
  105. "bagging_fraction": 0.9, # 建树的样本采样比例
  106. "feature_fraction": 0.8, # 建树的特征选择比例
  107. "bagging_freq": 5, # k 意味着每 k 次迭代执行bagging
  108. "num_threads": 4, # 线程数量
  109. }
  110. # 训练模型
  111. num_round = 500
  112. print("开始训练......")
  113. bst = lgb.train(params, train_data, num_round, valid_sets=[test_data])
  114. bst.save_model(self.model)
  115. print("模型训练完成✅")
  116. def evaluate_model(self):
  117. """
  118. 评估模型性能
  119. :return:
  120. """
  121. fw = open("summary_tag_01.txt", "a+", encoding="utf-8")
  122. # 测试数据
  123. with open("produce_data/x_data_total_return_predict.json") as f1:
  124. x_list = json.loads(f1.read())
  125. # 测试 label
  126. with open("produce_data/y_data_total_return_predict.json") as f2:
  127. Y_test = json.loads(f2.read())
  128. Y_test = [0 if i <= 26 else 1 for i in Y_test]
  129. X_test = pd.DataFrame(x_list, columns=self.my_c)
  130. for key in self.str_columns:
  131. X_test[key] = self.label_encoder.fit_transform(X_test[key])
  132. for key in self.float_columns:
  133. X_test[key] = pd.to_numeric(X_test[key], errors="coerce")
  134. bst = lgb.Booster(model_file=self.model)
  135. y_pred = bst.predict(X_test, num_iteration=bst.best_iteration)
  136. y_pred_binary = [0 if i <= 0.158550 else 1 for i in list(y_pred)]
  137. # 转换为二进制输出
  138. score_list = []
  139. for index, item in enumerate(list(y_pred)):
  140. real_label = Y_test[index]
  141. score = item
  142. prid_label = y_pred_binary[index]
  143. print(real_label, "\t", prid_label, "\t", score)
  144. fw.write("{}\t{}\t{}\n".format(real_label, prid_label, score))
  145. score_list.append(score)
  146. print("预测样本总量: {}".format(len(score_list)))
  147. data_series = pd.Series(score_list)
  148. print("统计 score 信息")
  149. print(data_series.describe())
  150. # 评估模型
  151. accuracy = accuracy_score(Y_test, y_pred_binary)
  152. print(f"Accuracy: {accuracy}")
  153. fw.close()
  154. def feature_importance(self):
  155. """
  156. Get the importance of each feature
  157. :return:
  158. """
  159. lgb_model = lgb.Booster(model_file=self.model)
  160. importance = lgb_model.feature_importance(importance_type='split')
  161. feature_name = lgb_model.feature_name()
  162. feature_importance = sorted(zip(feature_name, importance), key=lambda x: x[1], reverse=True)
  163. # 打印特征重要性
  164. for name, imp in feature_importance:
  165. print(name, imp)
  166. if __name__ == "__main__":
  167. L = LightGBM()
  168. # L.train_model()
  169. L.evaluate_model()
  170. # L.feature_importance()