main.py 1.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940
  1. import numpy as np
  2. import lightgbm as lgb
  3. from sklearn.model_selection import train_test_split
  4. from sklearn.datasets import make_classification
  5. from sklearn.metrics import accuracy_score
  6. # 生成模拟数据
  7. X, y = make_classification(n_samples=1000, n_features=20, n_classes=2, random_state=42)
  8. # 分割数据集
  9. X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
  10. # 创建LightGBM数据集
  11. train_data = lgb.Dataset(X_train, label=y_train)
  12. test_data = lgb.Dataset(X_test, label=y_test, reference=train_data)
  13. print(test_data)
  14. print(y_test)
  15. # 设置模型的参数
  16. params = {
  17. 'objective': 'binary', # 指定二分类任务
  18. 'metric': 'binary_logloss', # 评估指标为二分类的log损失
  19. 'num_leaves': 31, # 叶子节点数
  20. 'learning_rate': 0.05, # 学习率
  21. 'bagging_fraction': 0.9, # 建树的样本采样比例
  22. 'feature_fraction': 0.8, # 建树的特征选择比例
  23. 'bagging_freq': 5, # k 意味着每 k 次迭代执行bagging
  24. }
  25. # 训练模型
  26. num_round = 100
  27. bst = lgb.train(params, train_data, num_round, valid_sets=[test_data])
  28. # 预测
  29. y_pred = bst.predict(X_test, num_iteration=bst.best_iteration)
  30. # 转换为二进制输出
  31. y_pred_binary = np.where(y_pred > 0.5, 1, 0)
  32. # 评估模型
  33. accuracy = accuracy_score(y_test, y_pred_binary)
  34. print(f'Accuracy: {accuracy}')