123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114 |
- import os
- import sys
- import json
- sys.path.append(os.getcwd())
- import numpy as np
- import pandas as pd
- import lightgbm as lgb
- from sklearn.preprocessing import LabelEncoder
- from sklearn.metrics import accuracy_score
- label_encoder = LabelEncoder()
- my_c = [
- "uid",
- "type",
- "channel",
- "fans",
- "view_count_user_30days",
- "share_count_user_30days",
- "return_count_user_30days",
- "rov_user",
- "str_user",
- "out_user_id",
- "mode",
- "out_play_cnt",
- "out_like_cnt",
- "out_share_cnt",
- "out_collection_cnt"
- ]
- str_cols = ["uid", "type", "channel", "mode", "out_user_id"]
- float_cols = [
- "fans",
- "view_count_user_30days",
- "share_count_user_30days",
- "return_count_user_30days",
- "rov_user",
- "str_user",
- "out_play_cnt",
- "out_like_cnt",
- "out_share_cnt",
- "out_collection_cnt"
- ]
- with open("whole_data/x_data_total_return.json") as f1:
- x_list = json.loads(f1.read())[30000:230000]
- print(len(x_list))
- index_t = int(len(x_list) * 0.7)
- X_train = pd.DataFrame(x_list[:index_t], columns=my_c)
- for key in str_cols:
- X_train[key] = label_encoder.fit_transform(X_train[key])
- for key in float_cols:
- X_train[key] = pd.to_numeric(X_train[key], errors='coerce')
- X_test = pd.DataFrame(x_list[index_t:], columns=my_c)
- for key in str_cols:
- X_test[key] = label_encoder.fit_transform(X_test[key])
- for key in float_cols:
- X_test[key] = pd.to_numeric(X_test[key], errors='coerce')
- print("读取X数据成功!")
- with open("whole_data/y_data_total_return.json") as f2:
- y_list = json.loads(f2.read())[30000:230000]
- print(len(y_list))
- index_t = int(len(y_list) * 0.7)
- temp = sorted(y_list)
- yuzhi = temp[int(len(temp) * 0.8)-1]
- y__list = [0 if i <= yuzhi else 1 for i in y_list]
- y_train = np.array(y__list[:index_t])
- y_test = np.array(y__list[index_t:])
- print("读取Y数据成功!")
- # 创建LightGBM数据集
- train_data = lgb.Dataset(X_train, label=y_train, categorical_feature=['uid', 'type', 'channel', 'mode', 'out_user_id'])
- test_data = lgb.Dataset(X_test, label=y_test, reference=train_data)
- print("数据集创建成功")
- # 设置模型的参数
- # params = {
- # 'objective': 'binary', # 指定二分类任务
- # 'metric': 'binary_logloss', # 评估指标为二分类的log损失
- # 'num_leaves': 31, # 叶子节点数
- # 'learning_rate': 0.05, # 学习率
- # 'bagging_fraction': 0.9, # 建树的样本采样比例
- # 'feature_fraction': 0.8, # 建树的特征选择比例
- # 'bagging_freq': 5, # k 意味着每 k 次迭代执行bagging
- # }
- params = {
- 'objective': 'binary',
- 'metric': 'binary_logloss',
- 'num_leaves': 20, # 减少叶子节点数
- 'learning_rate': 0.1, # 增大学习率
- 'bagging_fraction': 0.95, # 略微增加抽样比例
- 'feature_fraction': 0.95, # 略微增加特征抽样比例
- 'bagging_freq': 0, # 减少bagging频率
- # 'boosting_type': 'hist', # 使用基于直方图的快速方法
- }
- # 训练模型
- num_round = 100
- print("开始训练......")
- bst = lgb.train(params, train_data, num_round, valid_sets=[test_data])
- print("训练完成! , 开始预测......")
- # 预测
- y_pred = bst.predict(X_test, num_iteration=bst.best_iteration)
- # 转换为二进制输出
- y_pred_binary = np.where(y_pred > 0.7, 1, 0)
- # 评估模型
- accuracy = accuracy_score(y_test, y_pred_binary)
- print(f'Accuracy: {accuracy}')
|