main.py 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. import os
  2. import sys
  3. import json
  4. sys.path.append(os.getcwd())
  5. import numpy as np
  6. import pandas as pd
  7. import lightgbm as lgb
  8. from sklearn.model_selection import train_test_split
  9. from sklearn.datasets import make_classification
  10. from sklearn.preprocessing import LabelEncoder
  11. from sklearn.metrics import accuracy_score
  12. label_encoder = LabelEncoder()
  13. my_c = [
  14. "uid",
  15. "type",
  16. "channel",
  17. "fans",
  18. "view_count_user_30days",
  19. "share_count_user_30days",
  20. "return_count_user_30days",
  21. "rov_user",
  22. "str_user",
  23. "out_user_id",
  24. "mode",
  25. "out_play_cnt",
  26. "out_like_cnt",
  27. "out_share_cnt",
  28. "out_collection_cnt"
  29. ]
  30. str_cols = ["uid", "type", "channel", "mode"]
  31. float_cols = [
  32. "fans",
  33. "view_count_user_30days",
  34. "share_count_user_30days",
  35. "return_count_user_30days",
  36. "rov_user",
  37. "str_user",
  38. "out_user_id",
  39. "out_play_cnt",
  40. "out_like_cnt",
  41. "out_share_cnt",
  42. "out_collection_cnt"
  43. ]
  44. with open("whole_data/x_data.json") as f1:
  45. x_list = json.loads(f1.read())
  46. index_t = int(len(x_list) * 0.7)
  47. X_train = pd.DataFrame(x_list[:index_t], columns=my_c)
  48. for key in str_cols:
  49. X_train[key] = label_encoder.fit_transform(X_train[key])
  50. for key in float_cols:
  51. X_train[key] = pd.to_numeric(X_train[key], errors='coerce')
  52. X_test = pd.DataFrame(x_list[index_t:], columns=my_c)
  53. for key in str_cols:
  54. X_test[key] = label_encoder.fit_transform(X_test[key])
  55. for key in float_cols:
  56. X_test[key] = pd.to_numeric(X_test[key], errors='coerce')
  57. with open("whole_data/y_data.json") as f2:
  58. y_list = json.loads(f2.read())
  59. index_t = int(len(y_list) * 0.7)
  60. temp = sorted(y_list)
  61. yuzhi = temp[int(len(temp) * 0.8)-1]
  62. y__list = [0 if i <= yuzhi else 1 for i in y_list]
  63. y_train = np.array(y__list[:index_t])
  64. y_test = np.array(y__list[index_t:])
  65. # 创建LightGBM数据集
  66. train_data = lgb.Dataset(X_train, label=y_train, categorical_feature=['uid', 'type', 'channel', 'mode'])
  67. test_data = lgb.Dataset(X_test, label=y_test, reference=train_data)
  68. # 设置模型的参数
  69. params = {
  70. 'objective': 'binary', # 指定二分类任务
  71. 'metric': 'binary_logloss', # 评估指标为二分类的log损失
  72. 'num_leaves': 31, # 叶子节点数
  73. 'learning_rate': 0.05, # 学习率
  74. 'bagging_fraction': 0.9, # 建树的样本采样比例
  75. 'feature_fraction': 0.8, # 建树的特征选择比例
  76. 'bagging_freq': 5, # k 意味着每 k 次迭代执行bagging
  77. }
  78. # 训练模型
  79. num_round = 1000
  80. bst = lgb.train(params, train_data, num_round, valid_sets=[test_data])
  81. # 预测
  82. y_pred = bst.predict(X_test, num_iteration=bst.best_iteration)
  83. # 转换为二进制输出
  84. y_pred_binary = np.where(y_pred > 0.5, 1, 0)
  85. # 评估模型
  86. accuracy = accuracy_score(y_test, y_pred_binary)
  87. print(f'Accuracy: {accuracy}')