main.py 1.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344
  1. import numpy as np
  2. import lightgbm as lgb
  3. from sklearn.model_selection import train_test_split
  4. from sklearn.datasets import make_classification
  5. from sklearn.metrics import accuracy_score
  6. # 生成模拟数据
  7. X, y = make_classification(n_samples=1000, n_features=20, n_classes=2, random_state=42)
  8. # 分割数据集
  9. X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
  10. # 创建LightGBM数据集
  11. train_data = lgb.Dataset(X_train, label=y_train)
  12. test_data = lgb.Dataset(X_test, label=y_test, reference=train_data)
  13. print(X_train.shape)
  14. for line in X_train:
  15. print(line)
  16. # print(X_train)
  17. print(y_test)
  18. # 设置模型的参数
  19. params = {
  20. 'objective': 'binary', # 指定二分类任务
  21. 'metric': 'binary_logloss', # 评估指标为二分类的log损失
  22. 'num_leaves': 31, # 叶子节点数
  23. 'learning_rate': 0.05, # 学习率
  24. 'bagging_fraction': 0.9, # 建树的样本采样比例
  25. 'feature_fraction': 0.8, # 建树的特征选择比例
  26. 'bagging_freq': 5, # k 意味着每 k 次迭代执行bagging
  27. }
  28. # 训练模型
  29. num_round = 100
  30. bst = lgb.train(params, train_data, num_round, valid_sets=[test_data])
  31. # 预测
  32. y_pred = bst.predict(X_test, num_iteration=bst.best_iteration)
  33. # 转换为二进制输出
  34. y_pred_binary = np.where(y_pred > 0.5, 1, 0)
  35. # 评估模型
  36. accuracy = accuracy_score(y_test, y_pred_binary)
  37. print(f'Accuracy: {accuracy}')