ad_predict_user_data_process.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. import os.path
  2. import time
  3. import datetime
  4. import pandas as pd
  5. from odps import ODPS
  6. # ODPS服务配置
  7. odps_config = {
  8. 'ENDPOINT': 'http://service.cn.maxcompute.aliyun.com/api',
  9. 'ACCESSID': 'LTAIWYUujJAm7CbH',
  10. 'ACCESSKEY': 'RfSjdiWwED1sGFlsjXv0DlfTnZTG1P',
  11. }
  12. features = [
  13. 'apptype',
  14. 'mid',
  15. 'mid_preview_count_30day',
  16. 'mid_view_count_30day',
  17. 'mid_view_count_pv_30day',
  18. 'mid_play_count_30day',
  19. 'mid_play_count_pv_30day',
  20. 'mid_share_count_30day',
  21. 'mid_share_count_pv_30day',
  22. 'mid_return_count_30day',
  23. 'mid_share_rate_30day',
  24. 'mid_return_rate_30day',
  25. ]
  26. def get_feature_data(project, table, dt, app_type):
  27. """获取特征数据"""
  28. odps = ODPS(
  29. access_id=odps_config['ACCESSID'],
  30. secret_access_key=odps_config['ACCESSKEY'],
  31. project=project,
  32. endpoint=odps_config['ENDPOINT'],
  33. )
  34. feature_data = []
  35. sql = f"select * from {project}.{table} where dt={dt} and apptype={app_type}"
  36. with odps.execute_sql(sql).open_reader() as reader:
  37. for record in reader:
  38. # print(record)
  39. item = {}
  40. for feature_name in features:
  41. item[feature_name] = record[feature_name]
  42. feature_data.append(item)
  43. feature_df = pd.DataFrame(feature_data)
  44. return feature_df
  45. def user_data_process(project, table, dt, app_type):
  46. """每日特征处理"""
  47. print('step 1: get user feature data')
  48. feature_initial_df = get_feature_data(project=project, table=table, dt=dt, app_type=app_type)
  49. print(f"feature_initial_df shape: {feature_initial_df.shape}")
  50. print('step 2: process')
  51. feature_initial_df['apptype'] = feature_initial_df['apptype'].astype(int)
  52. feature_df = feature_initial_df.copy()
  53. # 缺失值填充
  54. feature_df.fillna(0, inplace=True)
  55. # 数据类型校正
  56. type_int_columns = [
  57. 'mid_preview_count_30day',
  58. 'mid_view_count_30day',
  59. 'mid_view_count_pv_30day',
  60. 'mid_play_count_30day',
  61. 'mid_play_count_pv_30day',
  62. 'mid_share_count_30day',
  63. 'mid_share_count_pv_30day',
  64. 'mid_return_count_30day',
  65. ]
  66. for column_name in type_int_columns:
  67. feature_df[column_name] = feature_df[column_name].astype(int)
  68. type_float_columns = [
  69. 'mid_share_rate_30day',
  70. 'mid_return_rate_30day',
  71. ]
  72. for column_name in type_float_columns:
  73. feature_df[column_name] = feature_df[column_name].astype(float)
  74. print(f"feature_df shape: {feature_df.shape}")
  75. print('step 3: add new user feature')
  76. # 补充新用户默认数据(使用均值)
  77. new_user_feature = {
  78. 'apptype': app_type,
  79. 'mid': '-1',
  80. 'mid_preview_count_30day': int(feature_df['mid_preview_count_30day'].mean()),
  81. 'mid_view_count_30day': int(feature_df['mid_view_count_30day'].mean()),
  82. 'mid_view_count_pv_30day': int(feature_df['mid_view_count_pv_30day'].mean()),
  83. 'mid_play_count_30day': int(feature_df['mid_play_count_30day'].mean()),
  84. 'mid_play_count_pv_30day': int(feature_df['mid_play_count_pv_30day'].mean()),
  85. 'mid_share_count_30day': int(feature_df['mid_share_count_30day'].mean()),
  86. 'mid_share_count_pv_30day': int(feature_df['mid_share_count_pv_30day'].mean()),
  87. 'mid_return_count_30day': int(feature_df['mid_return_count_30day'].mean()),
  88. }
  89. new_user_feature['mid_share_rate_30day'] = float(
  90. new_user_feature['mid_share_count_pv_30day'] / new_user_feature['mid_play_count_pv_30day'] + 1)
  91. new_user_feature['mid_return_rate_30day'] = float(
  92. new_user_feature['mid_return_count_30day'] / new_user_feature['mid_view_count_pv_30day'] + 1)
  93. new_user_feature_df = pd.DataFrame([new_user_feature])
  94. user_df = pd.concat([feature_df, new_user_feature_df])
  95. print(f"user_df shape: {user_df.shape}")
  96. print(f"step 4: to csv")
  97. # 写入csv
  98. predict_data_dir = './data/predict_data'
  99. if not os.path.exists(predict_data_dir):
  100. os.makedirs(predict_data_dir)
  101. user_df.to_csv(f"{predict_data_dir}/user_feature.csv", index=False)
  102. if __name__ == '__main__':
  103. st_time = time.time()
  104. project = 'loghubods'
  105. table = 'admodel_testset_mid'
  106. # dt = '20230725'
  107. now_date = datetime.datetime.today()
  108. dt = datetime.datetime.strftime(now_date - datetime.timedelta(days=1), '%Y%m%d')
  109. user_data_process(project=project, table=table, dt=dt, app_type=0)
  110. print(time.time() - st_time)