main.py 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. import os
  2. import sys
  3. import json
  4. sys.path.append(os.getcwd())
  5. import numpy as np
  6. import pandas as pd
  7. import lightgbm as lgb
  8. from sklearn.model_selection import train_test_split
  9. from sklearn.datasets import make_classification
  10. from sklearn.preprocessing import LabelEncoder
  11. from sklearn.metrics import accuracy_score
  12. label_encoder = LabelEncoder()
  13. my_c = [
  14. "uid",
  15. "type",
  16. "channel",
  17. "fans",
  18. "view_count_user_30days",
  19. "share_count_user_30days",
  20. "return_count_user_30days",
  21. "rov_user",
  22. "str_user",
  23. "out_user_id",
  24. "mode",
  25. "out_play_cnt",
  26. "out_like_cnt",
  27. "out_share_cnt",
  28. "out_collection_cnt"
  29. ]
  30. str_cols = ["uid", "type", "channel", "mode"]
  31. float_cols = [
  32. "fans",
  33. "view_count_user_30days",
  34. "share_count_user_30days",
  35. "return_count_user_30days",
  36. "rov_user",
  37. "str_user",
  38. "out_user_id",
  39. "out_play_cnt",
  40. "out_like_cnt",
  41. "out_share_cnt",
  42. "out_collection_cnt"
  43. ]
  44. with open("whole_data/x_data.json") as f1:
  45. x_list = json.loads(f1.read())
  46. print(len(x_list))
  47. # X_train = pd.DataFrame(x_list[:15000], columns=my_c)
  48. # for key in str_cols:
  49. # X_train[key] = label_encoder.fit_transform(X_train[key])
  50. # for key in float_cols:
  51. # X_train[key] = pd.to_numeric(X_train[key], errors='coerce')
  52. # X_test = pd.DataFrame(x_list[15000:], columns=my_c)
  53. # for key in str_cols:
  54. # X_test[key] = label_encoder.fit_transform(X_test[key])
  55. # for key in float_cols:
  56. # X_test[key] = pd.to_numeric(X_test[key], errors='coerce')
  57. #
  58. #
  59. # with open("whole_data/y_data.json") as f2:
  60. # y_list = json.loads(f2.read())
  61. # y__list = [0 if i <= 25 else 1 for i in y_list]
  62. # y_train = np.array(y__list[:15000])
  63. # y_test = np.array(y__list[15000:])
  64. #
  65. # # 创建LightGBM数据集
  66. # train_data = lgb.Dataset(X_train, label=y_train, categorical_feature=['uid', 'type', 'channel', 'mode'])
  67. # test_data = lgb.Dataset(X_test, label=y_test, reference=train_data)
  68. #
  69. # # 设置模型的参数
  70. # params = {
  71. # 'objective': 'binary', # 指定二分类任务
  72. # 'metric': 'binary_logloss', # 评估指标为二分类的log损失
  73. # 'num_leaves': 31, # 叶子节点数
  74. # 'learning_rate': 0.05, # 学习率
  75. # 'bagging_fraction': 0.9, # 建树的样本采样比例
  76. # 'feature_fraction': 0.8, # 建树的特征选择比例
  77. # 'bagging_freq': 5, # k 意味着每 k 次迭代执行bagging
  78. # }
  79. # 训练模型
  80. # num_round = 100
  81. # bst = lgb.train(params, train_data, num_round, valid_sets=[test_data])
  82. #
  83. # # 预测
  84. # y_pred = bst.predict(X_test, num_iteration=bst.best_iteration)
  85. # # 转换为二进制输出
  86. # y_pred_binary = np.where(y_pred > 0.5, 1, 0)
  87. #
  88. # # 评估模型
  89. # accuracy = accuracy_score(y_test, y_pred_binary)
  90. # print(f'Accuracy: {accuracy}')