process_data.py 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. """
  2. process the data to satisfy the lightgbm
  3. """
  4. import sys
  5. import os
  6. import json
  7. from tqdm import tqdm
  8. import jieba.analyse
  9. sys.path.append(os.getcwd())
  10. from functions import generate_label_date, MysqlClient
  11. class DataProcessor(object):
  12. """
  13. Process the data to satisfy the lightGBM
  14. """
  15. def __init__(self, flag, c="useful"):
  16. self.client = MysqlClient()
  17. self.flag = flag
  18. self.c = c
  19. def generate_train_label(self, item, y_ori_data, cate):
  20. """
  21. 生成训练数据,用 np.array矩阵的方式返回,
  22. :return: x_train, 训练数据, y_train, 训练 label
  23. """
  24. video_id = item["video_id"]
  25. dt = item["dt"]
  26. useful_features = [
  27. "uid",
  28. "type",
  29. "channel",
  30. "fans",
  31. "view_count_user_30days",
  32. "share_count_user_30days",
  33. "return_count_user_30days",
  34. "rov_user",
  35. "str_user",
  36. "out_user_id",
  37. "mode",
  38. "out_play_cnt",
  39. "out_like_cnt",
  40. "out_share_cnt",
  41. "out_collection_cnt",
  42. ]
  43. spider_features = [
  44. "channel",
  45. "view_count_user_30days",
  46. "share_count_user_30days",
  47. "return_count_user_30days",
  48. "rov_user",
  49. "str_user",
  50. "out_user_id",
  51. "mode",
  52. "out_play_cnt",
  53. "out_like_cnt",
  54. "out_share_cnt"
  55. ]
  56. user_features = [
  57. "uid",
  58. "channel",
  59. "fans",
  60. "view_count_user_30days",
  61. "share_count_user_30days",
  62. "return_count_user_30days",
  63. "rov_user",
  64. "str_user"
  65. ]
  66. match self.c:
  67. case "useful":
  68. item_features = [item[i] for i in useful_features]
  69. case "user":
  70. if item['type'] == "userupload":
  71. item_features = [item[i] for i in user_features]
  72. else:
  73. return None, None
  74. case "spider":
  75. if item['type'] == "spider":
  76. item_features = [item[i] for i in spider_features]
  77. else:
  78. return None, None
  79. keywords_textrank = self.title_processor(video_id)
  80. if keywords_textrank:
  81. for i in range(3):
  82. try:
  83. item_features.append(keywords_textrank[i])
  84. except:
  85. item_features.append(None)
  86. else:
  87. item_features.append(None)
  88. item_features.append(None)
  89. item_features.append(None)
  90. label_dt = generate_label_date(dt)
  91. label_obj = y_ori_data.get(label_dt, {}).get(video_id)
  92. if label_obj:
  93. label = int(label_obj[cate]) if label_obj[cate] else 0
  94. else:
  95. label = 0
  96. return label, item_features
  97. def title_processor(self, video_id):
  98. """
  99. 通过 video_id 去获取title, 然后通过 title 再分词,把关键词作为 feature
  100. :param video_id: the video id
  101. :return: tag_list [tag, tag, tag, tag......]
  102. """
  103. sql = f"""SELECT title from wx_video where id = {video_id};"""
  104. try:
  105. title = self.client.select(sql)[0][0]
  106. keywords_textrank = jieba.analyse.textrank(title, topK=3)
  107. return list(keywords_textrank)
  108. except Exception as e:
  109. print(video_id, "\t", e)
  110. return []
  111. def producer(self, dt):
  112. """
  113. 生成数据
  114. :return:none
  115. """
  116. if self.flag == "train":
  117. x_path = "data/train_data/train_2024010100_2024031523.json"
  118. y_path = "data/train_data/daily-label-20240101-20240325.json"
  119. elif self.flag == "predict":
  120. x_path = "data/pred_data/pred_202403{}00_202403{}23.json".format(dt, dt)
  121. y_path = "data/train_data/daily-label-20240101-20240325.json"
  122. else:
  123. return
  124. with open(x_path) as f:
  125. x_data = json.loads(f.read())
  126. with open(y_path) as f:
  127. y_data = json.loads(f.read())
  128. cate_list = ["total_return"]
  129. for c in cate_list:
  130. x_list = []
  131. y_list = []
  132. for video_obj in tqdm(x_data):
  133. our_label, features = self.generate_train_label(video_obj, y_data, c)
  134. if features:
  135. x_list.append(features)
  136. y_list.append(our_label)
  137. with open("data/produce_data/x_data_{}_{}_{}_{}.json".format(c, self.flag, dt, self.c), "w") as f1:
  138. f1.write(json.dumps(x_list, ensure_ascii=False))
  139. with open("data/produce_data/y_data_{}_{}_{}_{}.json".format(c, self.flag, dt, self.c), "w") as f2:
  140. f2.write(json.dumps(y_list, ensure_ascii=False))
  141. if __name__ == "__main__":
  142. flag = int(input("please input method train or predict:\n "))
  143. if flag == 1:
  144. t = "train"
  145. D = DataProcessor(flag=t, c="user")
  146. D.producer(dt="whole")
  147. else:
  148. t = "predict"
  149. D = DataProcessor(flag=t, c="user")
  150. for d in range(16, 22):
  151. D.producer(d)