ad_feature_process.py 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. import os.path
  2. import time
  3. import pandas as pd
  4. from utils import get_data_from_odps
  5. from odps.df import DataFrame
  6. from odps import ODPS
  7. from config import set_config
  8. config_, env = set_config()
  9. features = [
  10. 'apptype',
  11. 'subsessionid',
  12. 'mid',
  13. 'videoid',
  14. 'ad_mid',
  15. 'share_videoid',
  16. 'mid_preview_count_30day',
  17. 'mid_view_count_30day',
  18. 'mid_view_count_pv_30day',
  19. 'mid_play_count_30day',
  20. 'mid_play_count_pv_30day',
  21. 'mid_share_count_30day',
  22. 'mid_share_count_pv_30day',
  23. 'mid_return_count_30day',
  24. 'mid_share_rate_30day',
  25. 'mid_return_rate_30day',
  26. 'video_preview_count_uv_30day',
  27. 'video_preview_count_pv_30day',
  28. 'video_view_count_uv_30day',
  29. 'video_view_count_pv_30day',
  30. 'video_play_count_uv_30day',
  31. 'video_play_count_pv_30day',
  32. 'video_share_count_uv_30day',
  33. 'video_share_count_pv_30day',
  34. 'video_return_count_30day',
  35. 'video_ctr_uv_30day',
  36. 'video_ctr_pv_30day',
  37. 'video_share_rate_uv_30day',
  38. 'video_share_rate_pv_30day',
  39. 'video_return_rate_30day',
  40. ]
  41. train_feature = [
  42. 'mid_preview_count_30day',
  43. 'mid_view_count_30day',
  44. 'mid_view_count_pv_30day',
  45. 'mid_play_count_30day',
  46. 'mid_play_count_pv_30day',
  47. 'mid_share_count_30day',
  48. 'mid_share_count_pv_30day',
  49. 'mid_return_count_30day',
  50. 'mid_share_rate_30day',
  51. 'mid_return_rate_30day',
  52. 'video_preview_count_uv_30day',
  53. 'video_preview_count_pv_30day',
  54. 'video_view_count_uv_30day',
  55. 'video_view_count_pv_30day',
  56. 'video_play_count_uv_30day',
  57. 'video_play_count_pv_30day',
  58. 'video_share_count_uv_30day',
  59. 'video_share_count_pv_30day',
  60. 'video_return_count_30day',
  61. 'video_ctr_uv_30day',
  62. 'video_ctr_pv_30day',
  63. 'video_share_rate_uv_30day',
  64. 'video_share_rate_pv_30day',
  65. 'video_return_rate_30day',
  66. 'ad_status',
  67. 'share_status',
  68. ]
  69. def get_feature_data(project, table, features, dt):
  70. """获取特征数据"""
  71. # records = get_data_from_odps(date=dt, project=project, table=table)
  72. # feature_data = []
  73. # i = 0
  74. # for record in records:
  75. # if i > 300000:
  76. # break
  77. # item = {}
  78. # for feature_name in features:
  79. # item[feature_name] = record[feature_name]
  80. # feature_data.append(item)
  81. # i += 1
  82. # feature_df = pd.DataFrame(feature_data)
  83. # return feature_df
  84. odps = ODPS(
  85. access_id=config_.ODPS_CONFIG['ACCESSID'],
  86. secret_access_key=config_.ODPS_CONFIG['ACCESSKEY'],
  87. project=project,
  88. endpoint=config_.ODPS_CONFIG['ENDPOINT'],
  89. )
  90. feature_data = []
  91. sql = f"select * from {project}.{table} where dt={dt} and apptype=0"
  92. with odps.execute_sql(sql).open_reader() as reader:
  93. for record in reader:
  94. # print(record)
  95. item = {}
  96. for feature_name in features:
  97. item[feature_name] = record[feature_name]
  98. feature_data.append(item)
  99. feature_df = pd.DataFrame(feature_data)
  100. return feature_df
  101. def daily_data_process(project, table, features, dt, app_type):
  102. """每日特征处理"""
  103. print('step 1: get feature data')
  104. feature_initial_df = get_feature_data(project=project, table=table, features=features, dt=dt)
  105. print(f"feature_initial_df shape: {feature_initial_df.shape}")
  106. print('step 2: process')
  107. feature_initial_df['apptype'] = feature_initial_df['apptype'].astype(int)
  108. feature_df = feature_initial_df[feature_initial_df['apptype'] == app_type].copy()
  109. # 增加此次是否有广告字段 'ad_status' 1: 有广告, 0: 无广告
  110. feature_df['ad_status'] = feature_df.apply(func=lambda x: 1 if x['ad_mid'] == x['mid'] else 0, axis=1)
  111. feature_df['share_videoid'].fillna(0, inplace=True)
  112. feature_df['share_videoid'] = feature_df['share_videoid'].astype(int)
  113. feature_df['videoid'] = feature_df['videoid'].astype(int)
  114. # 增加此次是否分享了该视频 'share_status' 1: 分享, 0: 为分享
  115. feature_df['share_status'] = feature_df.apply(func=lambda x: 1 if x['share_videoid'] == x['videoid'] else 0, axis=1)
  116. # 缺失值填充
  117. feature_df.fillna(0, inplace=True)
  118. # 数据类型校正
  119. type_int_columns = [
  120. 'mid_preview_count_30day',
  121. 'mid_view_count_30day',
  122. 'mid_view_count_pv_30day',
  123. 'mid_play_count_30day',
  124. 'mid_play_count_pv_30day',
  125. 'mid_share_count_30day',
  126. 'mid_share_count_pv_30day',
  127. 'mid_return_count_30day',
  128. 'video_preview_count_uv_30day',
  129. 'video_preview_count_pv_30day',
  130. 'video_view_count_uv_30day',
  131. 'video_view_count_pv_30day',
  132. 'video_play_count_uv_30day',
  133. 'video_play_count_pv_30day',
  134. 'video_share_count_uv_30day',
  135. 'video_share_count_pv_30day',
  136. 'video_return_count_30day',
  137. ]
  138. for column_name in type_int_columns:
  139. feature_df[column_name].astype(int)
  140. type_float_columns = [
  141. 'mid_share_rate_30day',
  142. 'mid_return_rate_30day',
  143. 'video_ctr_uv_30day',
  144. 'video_ctr_pv_30day',
  145. 'video_share_rate_uv_30day',
  146. 'video_share_rate_pv_30day',
  147. 'video_return_rate_30day',
  148. ]
  149. for column_name in type_float_columns:
  150. feature_df[column_name].astype(float)
  151. print(f"feature_df shape: {feature_df.shape}")
  152. # 获取所需的字段
  153. print('step 3: get train_df')
  154. train_df = feature_df[train_feature]
  155. print(f"train_df shape: {train_df.shape}")
  156. # 写入csv
  157. train_data_dir = './data/train_data'
  158. if not os.path.exists(train_data_dir):
  159. os.makedirs(train_data_dir)
  160. train_df.to_csv(f"{train_data_dir}/{dt}.csv")
  161. return train_df
  162. if __name__ == '__main__':
  163. st_time = time.time()
  164. project = 'loghubods'
  165. table = 'admodel_data_train'
  166. dt = '20230725'
  167. df = daily_data_process(project=project, table=table, features=features, dt=dt, app_type=0)
  168. print(df.shape)
  169. print(df.columns)
  170. # df.to_csv(f'./data/{dt}.csv', index=False)
  171. # get_feature_data(project=project, table=table, features=features, dt=dt)
  172. print(time.time() - st_time)