main.py 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. import os
  2. import sys
  3. import json
  4. from sklearn.linear_model import LogisticRegression
  5. sys.path.append(os.getcwd())
  6. import numpy as np
  7. import pandas as pd
  8. import lightgbm as lgb
  9. from sklearn.preprocessing import LabelEncoder
  10. from sklearn.metrics import accuracy_score
  11. class LightGBM(object):
  12. """
  13. LightGBM model for classification
  14. """
  15. def __init__(self):
  16. self.label_encoder = LabelEncoder()
  17. self.my_c = [
  18. "uid",
  19. "type",
  20. "channel",
  21. "fans",
  22. "view_count_user_30days",
  23. "share_count_user_30days",
  24. "return_count_user_30days",
  25. "rov_user",
  26. "str_user",
  27. "out_user_id",
  28. "mode",
  29. "out_play_cnt",
  30. "out_like_cnt",
  31. "out_share_cnt",
  32. "out_collection_cnt",
  33. ]
  34. self.str_columns = ["uid", "type", "channel", "mode", "out_user_id"]
  35. self.float_columns = [
  36. "fans",
  37. "view_count_user_30days",
  38. "share_count_user_30days",
  39. "return_count_user_30days",
  40. "rov_user",
  41. "str_user",
  42. "out_play_cnt",
  43. "out_like_cnt",
  44. "out_share_cnt",
  45. "out_collection_cnt",
  46. ]
  47. self.split_c = 0.95
  48. self.yc = 0.8
  49. self.model = "lightgbm_train.bin"
  50. def generate_x_data(self):
  51. """
  52. Generate data for feature engineering
  53. :return:
  54. """
  55. with open("whole_data/x_data_total_return.json") as f1:
  56. x_list = json.loads(f1.read())
  57. index_t = int(len(x_list) * self.split_c)
  58. X_train = pd.DataFrame(x_list[:index_t], columns=self.my_c)
  59. for key in self.str_columns:
  60. X_train[key] = self.label_encoder.fit_transform(X_train[key])
  61. for key in self.float_columns:
  62. X_train[key] = pd.to_numeric(X_train[key], errors="coerce")
  63. X_test = pd.DataFrame(x_list[index_t:], columns=self.my_c)
  64. for key in self.str_columns:
  65. X_test[key] = self.label_encoder.fit_transform(X_test[key])
  66. for key in self.float_columns:
  67. X_test[key] = pd.to_numeric(X_test[key], errors="coerce")
  68. return X_train, X_test
  69. def generate_y_data(self):
  70. """
  71. Generate data for label
  72. :return:
  73. """
  74. with open("whole_data/y_data_total_return.json") as f2:
  75. y_list = json.loads(f2.read())
  76. index_t = int(len(y_list) * self.split_c)
  77. temp = sorted(y_list)
  78. yuzhi = temp[int(len(temp) * self.yc) - 1]
  79. print("阈值是: {}".format(yuzhi))
  80. y__list = [0 if i <= yuzhi else 1 for i in y_list]
  81. y_train = np.array(y__list[:index_t])
  82. y_test = np.array(y__list[index_t:])
  83. return y_train, y_test
  84. def train_model(self):
  85. """
  86. Load dataset
  87. :return:
  88. """
  89. X_train, X_test = self.generate_x_data()
  90. Y_train, Y_test = self.generate_y_data()
  91. train_data = lgb.Dataset(
  92. X_train,
  93. label=Y_train,
  94. categorical_feature=["uid", "type", "channel", "mode", "out_user_id"],
  95. )
  96. test_data = lgb.Dataset(X_test, label=Y_test, reference=train_data)
  97. params = {
  98. "objective": "binary", # 指定二分类任务
  99. "metric": "binary_logloss", # 评估指标为二分类的log损失
  100. "num_leaves": 31, # 叶子节点数
  101. "learning_rate": 0.05, # 学习率
  102. "bagging_fraction": 0.9, # 建树的样本采样比例
  103. "feature_fraction": 0.8, # 建树的特征选择比例
  104. "bagging_freq": 5, # k 意味着每 k 次迭代执行bagging
  105. "num_threads": 4, # 线程数量
  106. }
  107. # 训练模型
  108. num_round = 100
  109. print("开始训练......")
  110. bst = lgb.train(params, train_data, num_round, valid_sets=[test_data])
  111. bst.save_model(self.model)
  112. print("模型训练完成✅")
  113. def evaluate_model(self):
  114. """
  115. 评估模型性能
  116. :return:
  117. """
  118. fw = open("summary.txt", "a+", encoding="utf-8")
  119. # 测试数据
  120. with open("whole_data/x_data_total_return_prid.json") as f1:
  121. x_list = json.loads(f1.read())
  122. # 测试 label
  123. with open("whole_data/y_data_total_return_prid.json") as f2:
  124. Y_test = json.loads(f2.read())
  125. Y_test = [0 if i <= 26 else 1 for i in Y_test]
  126. X_test = pd.DataFrame(x_list, columns=self.my_c)
  127. for key in self.str_columns:
  128. X_test[key] = self.label_encoder.fit_transform(X_test[key])
  129. for key in self.float_columns:
  130. X_test[key] = pd.to_numeric(X_test[key], errors="coerce")
  131. bst = lgb.Booster(model_file=self.model)
  132. y_pred = bst.predict(X_test, num_iteration=bst.best_iteration)
  133. y_pred_binary = [0 if i <= 0.1613 else 1 for i in list(y_pred)]
  134. # 转换为二进制输出
  135. score_list = []
  136. for index, item in enumerate(list(y_pred)):
  137. real_label = Y_test[index]
  138. score = item
  139. prid_label = y_pred_binary[index]
  140. print(real_label, "\t", prid_label, "\t", score)
  141. fw.write("{}\t{}\t{}\n".format(real_label, prid_label, score))
  142. score_list.append(score)
  143. print("预测样本总量: {}".format(len(score_list)))
  144. data_series = pd.Series(score_list)
  145. print("统计 score 信息")
  146. print(data_series.describe())
  147. # 评估模型
  148. accuracy = accuracy_score(Y_test, y_pred_binary)
  149. print(f"Accuracy: {accuracy}")
  150. fw.close()
  151. if __name__ == "__main__":
  152. L = LightGBM()
  153. # L.train_model()
  154. L.evaluate_model()