ad_xgboost_predict_data_generate.py 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172
  1. import os
  2. import time
  3. import pandas as pd
  4. predict_data_dir = './data/predict_data'
  5. user_filename = 'user_feature.csv'
  6. video_filename = 'video_feature.csv'
  7. def read_csv_data(filepath):
  8. if os.path.exists(filepath):
  9. data = pd.read_csv(filepath, sep=',', engine='python', iterator=True)
  10. chunk_size = 1000000
  11. chunks = []
  12. loop = True
  13. while loop:
  14. try:
  15. chunk_data = data.get_chunk(chunk_size)
  16. chunks.append(chunk_data)
  17. except StopIteration:
  18. loop = False
  19. df = pd.concat(chunks, ignore_index=True)
  20. return df
  21. else:
  22. print("Don't have this file!")
  23. return None
  24. if __name__ == '__main__':
  25. st_time = time.time()
  26. # 1. 获取用户特征数据
  27. user_filepath = f"{predict_data_dir}/{user_filename}"
  28. user_df = read_csv_data(filepath=user_filepath)
  29. # 2. 获取视频特征数据
  30. video_filepath = f"{predict_data_dir}/{video_filename}"
  31. video_df = read_csv_data(filepath=video_filepath)
  32. # 3. 用户特征和视频特征进行拼接
  33. video_features = [
  34. 'videoid',
  35. 'video_preview_count_uv_30day',
  36. 'video_preview_count_pv_30day',
  37. 'video_view_count_uv_30day',
  38. 'video_view_count_pv_30day',
  39. 'video_play_count_uv_30day',
  40. 'video_play_count_pv_30day',
  41. 'video_share_count_uv_30day',
  42. 'video_share_count_pv_30day',
  43. 'video_return_count_30day',
  44. 'video_ctr_uv_30day',
  45. 'video_ctr_pv_30day',
  46. 'video_share_rate_uv_30day',
  47. 'video_share_rate_pv_30day',
  48. 'video_return_rate_30day',
  49. ]
  50. merge_df_list = []
  51. for ind, row in video_df.iterrows():
  52. merge_df_temp = user_df.copy()
  53. for feature in video_features:
  54. merge_df_temp[feature] = row[feature]
  55. merge_df_list.append(merge_df_temp)
  56. merge_df = pd.concat(merge_df_list, ignore_index=True)
  57. # 4. 拼接广告特征ad_status
  58. for ad_status in [0, 1]:
  59. res_df = merge_df.copy()
  60. res_df['ad_status'] = ad_status
  61. # 写入csv
  62. predict_data_dir = './data/predict_data'
  63. if not os.path.exists(predict_data_dir):
  64. os.makedirs(predict_data_dir)
  65. res_df.to_csv(f"{predict_data_dir}/predict_data_{ad_status}.csv", index=False)
  66. print(f"{time.time() - st_time}s")