Ver Fonte

data-to-loacl

罗俊辉 há 1 ano atrás
pai
commit
045d4a78e5
1 ficheiros alterados com 9 adições e 11 exclusões
  1. 9 11
      main_spider.py

+ 9 - 11
main_spider.py

@@ -54,7 +54,7 @@ class LightGBM(object):
         self.model = "models/lightgbm_0401_spider.bin"
         self.flag = flag
         self.dt = dt
-        self.label_mapping = {}
+        # self.label_mapping = {}
 
     def read_data(self, path, yc=None):
         """
@@ -64,8 +64,6 @@ class LightGBM(object):
         df = pd.read_json(path)
         df = df.dropna(subset=['label'])  # 把 label 为空的删掉
         labels = df['label']
-        video_ids = df['video_id']
-        video_titles = df['video_title']
         if not yc:
             temp = sorted(labels)
             yc = temp[int(len(temp) * 0.7)]
@@ -76,12 +74,12 @@ class LightGBM(object):
             features[key] = pd.to_numeric(features[key], errors="coerce")
         for key in self.str_columns:
             features[key] = self.label_encoder.fit_transform(features[key])
-            self.label_mapping[key] = dict(zip(self.label_encoder.classes_, self.label_encoder.transform(self.label_encoder.classes_)))
-        return features, labels, video_ids, video_titles
+            # self.label_mapping[key] = dict(zip(self.label_encoder.classes_, self.label_encoder.transform(self.label_encoder.classes_)))
+        return features, labels, df
 
     def best_params(self):
         path = "data/train_data/spider_data_240401.json"
-        X, y, ids, titles = self.read_data(path)
+        X, y, ori_df = self.read_data(path)
         X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
 
         lgbm = lgb.LGBMClassifier(objective='binary')
@@ -125,7 +123,7 @@ class LightGBM(object):
         :return:
         """
         path = "data/train_data/spider_train_20240402.json"
-        x, y, ids, titles = self.read_data(path)
+        x, y, ori_df = self.read_data(path)
         train_size = int(len(x) * self.split_c)
         X_train, X_test = x[:train_size], x[train_size:]
         Y_train, Y_test = y[:train_size], y[train_size:]
@@ -159,7 +157,7 @@ class LightGBM(object):
         """
         fw = open("result/summary_{}.txt".format(dt), "a+", encoding="utf-8")
         path = 'data/predict_data/predict_{}.json'.format(dt)
-        x, y, ids, titles = self.read_data(path, yc=6)
+        x, y, ori_df = self.read_data(path, yc=6)
         true_label_df = pd.DataFrame(list(y), columns=['ture_label'])
         bst = lgb.Booster(model_file=self.model)
         y_pred = bst.predict(x, num_iteration=bst.best_iteration)
@@ -185,9 +183,9 @@ class LightGBM(object):
         print(f"Accuracy: {accuracy}")
         fw.close()
         # 水平合并
-        df_concatenated = pd.concat([ids, titles, x, true_label_df, pred_score_df, pred_label_df], axis=1)
-        for key in self.str_columns:
-            df_concatenated[key] = [self.label_mapping[key][i] for i in df_concatenated[key]]
+        df_concatenated = pd.concat([ori_df, true_label_df, pred_score_df, pred_label_df], axis=1)
+        # for key in self.str_columns:
+        #     df_concatenated[key] = [self.label_mapping[key][i] for i in df_concatenated[key]]
         df_concatenated.to_excel("data/predict_data/spider_predict_result_{}.xlsx".format(dt), index=False)
 
     def feature_importance(self):