Bläddra i källkod

add data monitor

liqian 3 år sedan
förälder
incheckning
bcbaa40847
2 ändrade filer med 97 tillägg och 2 borttagningar
  1. 95 0
      data_monitor.py
  2. 2 2
      rov_train.py

+ 95 - 0
data_monitor.py

@@ -0,0 +1,95 @@
+# 对训练数据的分布进行监控
+import numpy as np
+import pandas as pd
+import datetime
+
+from config import set_config
+from rov_train import process_data, process_predict_data
+
+config_, env = set_config()
+
+
+def get_feature_distribution(feature_name, feature_data):
+    statistical_results = {'feature_name': feature_name}
+    feature_data = np.array(feature_data)
+    feature_data_sorted = sorted(feature_data)
+    length = len(feature_data_sorted)
+    count_0 = len([item for item in feature_data_sorted if item == 0])
+    print('data_count = {}, count_0 = {}, rate_0 = {}'.format(length, count_0, count_0/length))
+    statistical_results['data_count'] = length
+    statistical_results['0_count'] = count_0
+    statistical_results['0_rate'] = count_0/length
+
+    # 整体数据分布
+    for percentile in [0.25, 0.5, 0.75, 1]:
+        data_count = int(length * percentile)
+        data = feature_data_sorted[:data_count + 1]
+        data_mean = np.mean(data)
+        data_var = np.var(data)
+        data_std = np.std(data)
+        # print('percentile = {}, data_count = {}, mean = {}, var = {}, std = {}'.format(
+        #     percentile, data_count, data_mean, data_var, data_std))
+        statistical_results['mean_{}'.format(percentile)] = data_mean
+        statistical_results['var_{}'.format(percentile)] = data_var
+        statistical_results['std_{}'.format(percentile)] = data_std
+
+    # 非零数据分布
+    data_non_zero = [item for item in feature_data_sorted if item != 0]
+    for percentile in [0.25, 0.5, 0.75, 1]:
+        data_count = int(len(data_non_zero) * percentile)
+        data = data_non_zero[:data_count + 1]
+        data_mean = np.mean(data)
+        data_var = np.var(data)
+        dat_std = np.std(data)
+        # print('percentile = {}, data_count = {}, mean = {}, var = {}, std = {}'.format(
+        #     percentile, data_count, data_mean, data_var, dat_std))
+        statistical_results['non_zero_mean_{}'.format(percentile)] = data_mean
+        statistical_results['non_zero_var_{}'.format(percentile)] = data_var
+        statistical_results['non_zero_std_{}'.format(percentile)] = data_std
+
+    return statistical_results
+
+
+def all_feature_distribution(data, file):
+    res = []
+    columns = [
+        'feature_name', 'data_count', '0_count', '0_rate',
+        'mean_0.25', 'mean_0.5', 'mean_0.75', 'mean_1',
+        'var_0.25', 'var_0.5', 'var_0.75', 'var_1',
+        'std_0.25', 'std_0.5', 'std_0.75', 'std_1',
+        'non_zero_mean_0.25', 'non_zero_mean_0.5', 'non_zero_mean_0.75', 'non_zero_mean_1',
+        'non_zero_var_0.25', 'non_zero_var_0.5', 'non_zero_var_0.75', 'non_zero_var_1',
+        'non_zero_std_0.25', 'non_zero_std_0.5', 'non_zero_std_0.75', 'non_zero_std_1'
+    ]
+    feature_importance = pd.read_csv('data/model_feature_importance.csv')
+    feature_name_list = list(feature_importance['feature'])
+    for feature_name in feature_name_list:
+        print(feature_name)
+        feature_data = data[feature_name]
+        statistical_results = get_feature_distribution(feature_name=feature_name, feature_data=feature_data)
+        res.append(statistical_results)
+    df = pd.DataFrame(res, columns=columns)
+    df.to_csv(file)
+
+
+def main():
+    now_date = datetime.datetime.strftime(datetime.datetime.today(), '%Y%m%d')
+    # now_date = '20220119'
+
+    # 训练数据
+    print('train data monitor...')
+    train_data_file = 'data/train_data_monitor_{}.csv'.format(now_date)
+    train_filename = config_.TRAIN_DATA_FILENAME
+    train_x, train_y, videos, fea = process_data(filename=train_filename)
+    all_feature_distribution(train_x, file=train_data_file)
+
+    # 预测数据
+    print('predict data monitor...')
+    predict_data_file = 'data/predict_data_monitor_{}.csv'.format(now_date)
+    predict_filename = config_.PREDICT_DATA_FILENAME
+    predict_x, video_ids = process_predict_data(filename=predict_filename)
+    all_feature_distribution(predict_x, file=predict_data_file)
+
+
+if __name__ == '__main__':
+    main()

+ 2 - 2
rov_train.py

@@ -46,7 +46,7 @@ def process_data(filename):
     x['stage_two_return_ratio'] = x['stage_two_return_added'] / x['stage_two_retrn']
 
     # 缺失值填充为0
-    x.fillna(0)
+    x.fillna(0, inplace=True)
 
     # 获取当前所使用的特征列表
     features = list(x)
@@ -85,7 +85,7 @@ def process_predict_data(filename):
     x['stage_two_return_ratio'] = x['stage_two_return_added'] / x['stage_two_retrn']
 
     # 缺失值填充为0
-    x.fillna(0)
+    x.fillna(0, inplace=True)
 
     return x, video_id_final