process_data.py 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. """
  2. process the data to satisfy the lightgbm
  3. """
  4. import sys
  5. import os
  6. import json
  7. from tqdm import tqdm
  8. import jieba.analyse
  9. sys.path.append(os.getcwd())
  10. from functions import generate_label_date, MysqlClient
  11. class DataProcessor(object):
  12. """
  13. Process the data to satisfy the lightGBM
  14. """
  15. def __init__(self, flag, c="useful"):
  16. self.client = MysqlClient()
  17. self.flag = flag
  18. self.c = c
  19. def generate_train_label(self, item, y_ori_data, cate):
  20. """
  21. 生成训练数据,用 np.array矩阵的方式返回,
  22. :return: x_train, 训练数据, y_train, 训练 label
  23. """
  24. video_id = item["video_id"]
  25. dt = item["dt"]
  26. useful_features = [
  27. "uid",
  28. "type",
  29. "channel",
  30. "fans",
  31. "view_count_user_30days",
  32. "share_count_user_30days",
  33. "return_count_user_30days",
  34. "rov_user",
  35. "str_user",
  36. "out_user_id",
  37. "mode",
  38. "out_play_cnt",
  39. "out_like_cnt",
  40. "out_share_cnt",
  41. "out_collection_cnt",
  42. ]
  43. spider_features = [
  44. "channel",
  45. "out_user_id",
  46. "mode",
  47. "out_play_cnt",
  48. "out_like_cnt",
  49. "out_share_cnt"
  50. ]
  51. user_features = [
  52. "uid",
  53. "channel",
  54. "fans",
  55. "view_count_user_30days",
  56. "share_count_user_30days",
  57. "return_count_user_30days",
  58. "rov_user",
  59. "str_user"
  60. ]
  61. match self.c:
  62. case "useful":
  63. item_features = [item[i] for i in useful_features]
  64. case "user":
  65. if item['type'] == "userupload":
  66. item_features = [item[i] for i in user_features]
  67. else:
  68. return None, None
  69. case "spider":
  70. if item['type'] == "spider":
  71. item_features = [item[i] for i in spider_features]
  72. else:
  73. return None, None
  74. keywords_textrank = self.title_processor(video_id)
  75. if keywords_textrank:
  76. for i in range(3):
  77. try:
  78. item_features.append(keywords_textrank[i])
  79. except:
  80. item_features.append(None)
  81. else:
  82. item_features.append(None)
  83. item_features.append(None)
  84. item_features.append(None)
  85. label_dt = generate_label_date(dt)
  86. label_obj = y_ori_data.get(label_dt, {}).get(video_id)
  87. if label_obj:
  88. label = int(label_obj[cate]) if label_obj[cate] else 0
  89. else:
  90. label = 0
  91. return label, item_features
  92. def title_processor(self, video_id):
  93. """
  94. 通过 video_id 去获取title, 然后通过 title 再分词,把关键词作为 feature
  95. :param video_id: the video id
  96. :return: tag_list [tag, tag, tag, tag......]
  97. """
  98. sql = f"""SELECT title from wx_video where id = {video_id};"""
  99. try:
  100. title = self.client.select(sql)[0][0]
  101. keywords_textrank = jieba.analyse.textrank(title, topK=3)
  102. return list(keywords_textrank)
  103. except Exception as e:
  104. print(video_id, "\t", e)
  105. return []
  106. def producer(self, dt):
  107. """
  108. 生成数据
  109. :return:none
  110. """
  111. if self.flag == "train":
  112. x_path = "data/train_data/train_2024010100_2024031523.json"
  113. y_path = "data/train_data/daily-label-20240101-20240325.json"
  114. elif self.flag == "predict":
  115. x_path = "data/pred_data/pred_202403{}00_202403{}23.json".format(dt, dt)
  116. y_path = "data/train_data/daily-label-20240101-20240325.json"
  117. else:
  118. return
  119. with open(x_path) as f:
  120. x_data = json.loads(f.read())
  121. with open(y_path) as f:
  122. y_data = json.loads(f.read())
  123. cate_list = ["total_return"]
  124. for c in cate_list:
  125. x_list = []
  126. y_list = []
  127. for video_obj in tqdm(x_data):
  128. our_label, features = self.generate_train_label(video_obj, y_data, c)
  129. if features:
  130. x_list.append(features)
  131. y_list.append(our_label)
  132. with open("data/produce_data/x_data_{}_{}_{}_{}.json".format(c, self.flag, dt, self.c), "w") as f1:
  133. f1.write(json.dumps(x_list, ensure_ascii=False))
  134. with open("data/produce_data/y_data_{}_{}_{}_{}.json".format(c, self.flag, dt, self.c), "w") as f2:
  135. f2.write(json.dumps(y_list, ensure_ascii=False))
  136. if __name__ == "__main__":
  137. flag = int(input("please input method train or predict:\n "))
  138. if flag == 1:
  139. t = "train"
  140. D = DataProcessor(flag=t, c="spider")
  141. D.producer(dt="whole")
  142. else:
  143. t = "predict"
  144. D = DataProcessor(flag=t, c="spider")
  145. for d in range(16, 22):
  146. D.producer(d)