rov_train.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. import os
  2. import random
  3. import time
  4. import lightgbm as lgb
  5. import pandas as pd
  6. from sklearn.model_selection import train_test_split
  7. from sklearn.metrics import mean_absolute_error, r2_score, mean_absolute_percentage_error
  8. from config import set_config
  9. from utils import read_from_pickle, write_to_pickle, data_normalization, \
  10. request_post, filter_video_status, update_video_w_h_rate
  11. from log import Log
  12. from db_helper import RedisHelper, MysqlHelper
  13. config_, env = set_config()
  14. log_ = Log()
  15. def process_data(filename):
  16. """
  17. 数据清洗、预处理
  18. :param filename: type-DataFrame
  19. :return: x, y, video_ids, features
  20. """
  21. # 获取数据
  22. data = read_from_pickle(filename)
  23. # 获取y,并将 y <= 0 的值更新为1
  24. data['futre7dayreturn'].loc[data['futre7dayreturn'] <= 0] = 1
  25. y = data['futre7dayreturn']
  26. # 获取视频id列
  27. video_ids = data['videoid']
  28. # 获取x
  29. drop_columns = ['videoid', 'dt', 'futre7dayreturn', 'videotags', 'words_without_tags']
  30. x = data.drop(columns=drop_columns)
  31. # 计算后一天的回流比前一天的回流差值
  32. x['stage_four_return_added'] = x['stage_four_retrn'] - x['stage_three_retrn']
  33. x['stage_three_return_added'] = x['stage_three_retrn'] - x['stage_two_retrn']
  34. x['stage_two_return_added'] = x['stage_two_retrn'] - x['stage_one_retrn']
  35. # 计算后一天回流比前一天回流的增长率
  36. x['stage_four_return_ratio'] = x['stage_four_return_added'] / x['stage_four_retrn']
  37. x['stage_three_return_ratio'] = x['stage_three_return_added'] / x['stage_three_retrn']
  38. x['stage_two_return_ratio'] = x['stage_two_return_added'] / x['stage_two_retrn']
  39. # 缺失值填充为0
  40. x.fillna(0)
  41. # 获取当前所使用的特征列表
  42. features = list(x)
  43. return x, y, video_ids, features
  44. def process_predict_data(filename):
  45. """
  46. 预测数据清洗、预处理
  47. :param filename: type-DataFrame
  48. :return: x, y, video_ids, features
  49. """
  50. # 获取数据
  51. data = read_from_pickle(filename)
  52. # 获取视频id列
  53. video_ids = data['videoid']
  54. # 视频状态过滤
  55. video_id_list = [int(video_id) for video_id in video_ids]
  56. filtered_videos = [str(item) for item in filter_video_status(video_ids=video_id_list)]
  57. data = data.loc[data['videoid'].isin(filtered_videos)]
  58. # 获取x
  59. drop_columns = ['videoid', 'dt', 'futre7dayreturn', 'videotags', 'words_without_tags']
  60. x = data.drop(columns=drop_columns)
  61. # 计算后一天的回流比前一天的回流差值
  62. x['stage_four_return_added'] = x['stage_four_retrn'] - x['stage_three_retrn']
  63. x['stage_three_return_added'] = x['stage_three_retrn'] - x['stage_two_retrn']
  64. x['stage_two_return_added'] = x['stage_two_retrn'] - x['stage_one_retrn']
  65. # 计算后一天回流比前一天回流的增长率
  66. x['stage_four_return_ratio'] = x['stage_four_return_added'] / x['stage_four_retrn']
  67. x['stage_three_return_ratio'] = x['stage_three_return_added'] / x['stage_three_retrn']
  68. x['stage_two_return_ratio'] = x['stage_two_return_added'] / x['stage_two_retrn']
  69. # 缺失值填充为0
  70. x.fillna(0)
  71. return x, filtered_videos
  72. def train(x, y, features):
  73. """
  74. 训练模型
  75. :param x: X
  76. :param y: Y
  77. :param features: 特征列表
  78. :return: None
  79. """
  80. # 训练集、测试集分割
  81. x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33)
  82. log_.info('x_train shape: {}, y_train shape: {}'.format(x_train.shape, y_train.shape))
  83. log_.info('x_test shape: {}, y_test shape: {}'.format(x_test.shape, y_test.shape))
  84. # 训练参数设置
  85. params = {
  86. "objective": "regression",
  87. "reg_sqrt": True,
  88. "metric": "mape",
  89. "max_depth": -1,
  90. "num_leaves": 50,
  91. "learning_rate": 0.1,
  92. "bagging_fraction": 0.7,
  93. "feature_fraction": 0.7,
  94. "bagging_freq": 8,
  95. "bagging_seed": 2018,
  96. "lambda_l1": 0.11,
  97. "boosting": "dart",
  98. "nthread": 4,
  99. "verbosity": -1
  100. }
  101. # 初始化数据集
  102. train_set = lgb.Dataset(data=x_train, label=y_train)
  103. test_set = lgb.Dataset(data=x_test, label=y_test)
  104. # 模型训练
  105. evals_result = {}
  106. model = lgb.train(params=params, train_set=train_set, num_boost_round=5000,
  107. valid_sets=[test_set], early_stopping_rounds=100,
  108. verbose_eval=100, evals_result=evals_result)
  109. # 将模型特征重要度存入csv
  110. feature_importance_data = {'feature': features, 'feature_importance': model.feature_importance()}
  111. feature_importance_filename = 'model_feature_importance.csv'
  112. pack_result_to_csv(filename=feature_importance_filename, sort_columns=['feature_importance'],
  113. ascending=False, **feature_importance_data)
  114. # 测试集预测
  115. pre_y_test = model.predict(data=x_test, num_iteration=model.best_iteration)
  116. y_test = y_test.values
  117. err_mape = mean_absolute_percentage_error(y_test, pre_y_test)
  118. r2 = r2_score(y_test, pre_y_test)
  119. # 将测试集结果存入csv
  120. test_data = {'pre_y_test': pre_y_test, 'y_test': y_test}
  121. test_result_filename = 'test_result.csv'
  122. pack_result_to_csv(filename=test_result_filename, sort_columns=['pre_y_test'], ascending=False, **test_data)
  123. log_.info('err_mape={}, r2={}'.format(err_mape, r2))
  124. # 保存模型
  125. write_to_pickle(data=model, filename=config_.MODEL_FILENAME)
  126. def pack_result_to_csv(filename, sort_columns=None, filepath=config_.DATA_DIR_PATH, ascending=True, **data):
  127. """
  128. 打包数据并存入csv
  129. :param filename: csv文件名
  130. :param sort_columns: 指定排序列名列名,type-list, 默认为None
  131. :param filepath: csv文件存放路径,默认为config_.DATA_DIR_PATH
  132. :param ascending: 是否按指定列的数组升序排列,默认为True,即升序排列
  133. :param data: 数据, type-dict
  134. :return: None
  135. """
  136. if not os.path.exists(filepath):
  137. os.makedirs(filepath)
  138. file = os.path.join(filepath, filename)
  139. df = pd.DataFrame(data=data)
  140. if sort_columns:
  141. df = df.sort_values(by=sort_columns, ascending=ascending)
  142. df.to_csv(file, index=False)
  143. def pack_list_result_to_csv(filename, data, columns=None, sort_columns=None, filepath=config_.DATA_DIR_PATH, ascending=True):
  144. """
  145. 打包数据并存入csv, 数据为字典列表
  146. :param filename: csv文件名
  147. :param data: 数据,type-list [{}, {},...]
  148. :param columns: 列名顺序
  149. :param sort_columns: 指定排序列名列名,type-list, 默认为None
  150. :param filepath: csv文件存放路径,默认为config_.DATA_DIR_PATH
  151. :param ascending: 是否按指定列的数组升序排列,默认为True,即升序排列
  152. :return: None
  153. """
  154. if not os.path.exists(filepath):
  155. os.makedirs(filepath)
  156. file = os.path.join(filepath, filename)
  157. df = pd.DataFrame(data=data)
  158. if sort_columns:
  159. df = df.sort_values(by=sort_columns, ascending=ascending)
  160. df.to_csv(file, index=False, columns=columns)
  161. def predict():
  162. """预测"""
  163. # 读取预测数据并进行清洗
  164. x, video_ids = process_predict_data(config_.PREDICT_DATA_FILENAME)
  165. log_.info('predict data shape: x={}'.format(x.shape))
  166. # 获取训练好的模型
  167. model = read_from_pickle(filename=config_.MODEL_FILENAME)
  168. # 预测
  169. y_ = model.predict(x)
  170. log_.info('predict finished!')
  171. # 将结果进行归一化到[0, 100]
  172. normal_y_ = data_normalization(list(y_))
  173. log_.info('normalization finished!')
  174. # 按照normal_y_降序排序
  175. predict_data = []
  176. for i, video_id in enumerate(video_ids):
  177. data = {'video_id': video_id, 'normal_y_': normal_y_[i], 'y_': y_[i]}
  178. predict_data.append(data)
  179. predict_data_sorted = sorted(predict_data, key=lambda temp: temp['normal_y_'], reverse=True)
  180. # 按照排序,从100以固定差值做等差递减,以该值作为rovScore
  181. predict_result = []
  182. redis_data = {}
  183. json_data = []
  184. video_id_list = []
  185. for j, item in enumerate(predict_data_sorted):
  186. video_id = int(item['video_id'])
  187. rov_score = 100 - j * config_.ROV_SCORE_D
  188. item['rov_score'] = rov_score
  189. predict_result.append(item)
  190. redis_data[video_id] = rov_score
  191. json_data.append({'videoId': video_id, 'rovScore': rov_score})
  192. video_id_list.append(video_id)
  193. # 打包预测结果存入csv
  194. predict_result_filename = 'predict.csv'
  195. pack_list_result_to_csv(filename=predict_result_filename,
  196. data=predict_result,
  197. columns=['video_id', 'rov_score', 'normal_y_', 'y_'],
  198. sort_columns=['rov_score'],
  199. ascending=False)
  200. # 上传redis
  201. key_name = config_.RECALL_KEY_NAME_PREFIX + time.strftime('%Y%m%d')
  202. redis_helper = RedisHelper()
  203. redis_helper.add_data_with_zset(key_name=key_name, data=redis_data)
  204. log_.info('data to redis finished!')
  205. # 清空修改ROV的视频数据
  206. redis_helper.del_keys(key_name=config_.UPDATE_ROV_KEY_NAME)
  207. # 通知后端更新数据
  208. log_.info('json_data count = {}'.format(len(json_data)))
  209. result = request_post(request_url=config_.NOTIFY_BACKEND_UPDATE_ROV_SCORE_URL, request_data={'videos': json_data})
  210. if result['code'] == 0:
  211. log_.info('notify backend success!')
  212. else:
  213. log_.error('notify backend fail!')
  214. # 更新视频的宽高比数据
  215. if video_id_list:
  216. update_video_w_h_rate(video_ids=video_id_list,
  217. key_name=config_.W_H_RATE_UP_1_VIDEO_LIST_KEY_NAME['rov_recall'])
  218. log_.info('update video w_h_rate to redis finished!')
  219. def predict_test():
  220. """测试环境数据生成"""
  221. # 获取测试环境中最近发布的40000条视频
  222. sql = "SELECT id FROM wx_video ORDER BY id DESC LIMIT 40000;"
  223. mysql_helper = MysqlHelper()
  224. data = mysql_helper.get_data(sql=sql)
  225. video_ids = [video[0] for video in data]
  226. # 视频状态过滤
  227. filtered_videos = filter_video_status(video_ids)
  228. log_.info('filtered_videos count = {}'.format(len(filtered_videos)))
  229. # 随机生成 0-100 数作为分数
  230. redis_data = {}
  231. json_data = []
  232. for video_id in filtered_videos:
  233. score = random.uniform(0, 100)
  234. redis_data[video_id] = score
  235. json_data.append({'videoId': video_id, 'rovScore': score})
  236. log_.info('json_data count = {}'.format(len(json_data)))
  237. # 上传Redis
  238. redis_helper = RedisHelper()
  239. key_name = config_.RECALL_KEY_NAME_PREFIX + time.strftime('%Y%m%d')
  240. redis_helper.add_data_with_zset(key_name=key_name, data=redis_data)
  241. log_.info('test data to redis finished!')
  242. # 清空修改ROV的视频数据
  243. redis_helper.del_keys(key_name=config_.UPDATE_ROV_KEY_NAME)
  244. # 通知后端更新数据
  245. result = request_post(request_url=config_.NOTIFY_BACKEND_UPDATE_ROV_SCORE_URL, request_data={'videos': json_data})
  246. if result['code'] == 0:
  247. log_.info('notify backend success!')
  248. else:
  249. log_.error('notify backend fail!')
  250. # 更新视频的宽高比数据
  251. if filtered_videos:
  252. update_video_w_h_rate(video_ids=filtered_videos,
  253. key_name=config_.W_H_RATE_UP_1_VIDEO_LIST_KEY_NAME['rov_recall'])
  254. log_.info('update video w_h_rate to redis finished!')
  255. if __name__ == '__main__':
  256. log_.info('rov model train start...')
  257. train_start = time.time()
  258. train_filename = config_.TRAIN_DATA_FILENAME
  259. X, Y, videos, fea = process_data(filename=train_filename)
  260. log_.info('X_shape = {}, Y_sahpe = {}'.format(X.shape, Y.shape))
  261. train(X, Y, features=fea)
  262. train_end = time.time()
  263. log_.info('rov model train end, execute time = {}ms'.format((train_end - train_start)*1000))
  264. log_.info('rov model predict start...')
  265. predict_start = time.time()
  266. if env in ['dev', 'test']:
  267. predict_test()
  268. elif env in ['pre', 'pro']:
  269. predict()
  270. else:
  271. log_.error('env error')
  272. predict_end = time.time()
  273. log_.info('rov model predict end, execute time = {}ms'.format((predict_end - predict_start)*1000))