prepare.py 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. import json
  2. from collections import defaultdict
  3. from pathlib import Path
  4. from examples.piaoquan_demand.data_query_tools import get_rov_by_merge_leve2_and_video_ids
  5. from examples.piaoquan_demand.db_manager import DatabaseManager
  6. from examples.piaoquan_demand.models import TopicPatternElement, TopicPatternExecution
  7. db = DatabaseManager()
  8. def _safe_float(value):
  9. if value is None:
  10. return 0.0
  11. try:
  12. return float(value)
  13. except (TypeError, ValueError):
  14. return 0.0
  15. def _build_category_scores(name_scores, name_paths, name_post_ids):
  16. """
  17. 计算分类路径节点权重:
  18. - 一个 name 的 score 贡献给其路径上的每个节点
  19. - 同一个 name 多条路径时,每条路径都累加
  20. """
  21. node_scores = defaultdict(float)
  22. node_post_ids = defaultdict(set)
  23. for name, score in name_scores.items():
  24. paths = name_paths.get(name, set())
  25. post_ids = name_post_ids.get(name, set())
  26. for category_path in paths:
  27. if not category_path:
  28. continue
  29. nodes = [segment.strip() for segment in category_path.split(">") if segment.strip()]
  30. for idx in range(len(nodes)):
  31. prefix = ">".join(nodes[: idx + 1])
  32. node_scores[prefix] += score
  33. if post_ids:
  34. node_post_ids[prefix].update(post_ids)
  35. return node_scores, node_post_ids
  36. def _write_json(path, payload):
  37. with open(path, "w", encoding="utf-8") as f:
  38. json.dump(payload, f, ensure_ascii=False, indent=2)
  39. def prepare(execution_id):
  40. session = db.get_session()
  41. try:
  42. execution = session.query(TopicPatternExecution).filter(
  43. TopicPatternExecution.id == execution_id
  44. ).first()
  45. if not execution:
  46. raise ValueError(f"execution_id 不存在: {execution_id}")
  47. merge_leve2 = execution.merge_leve2
  48. rows = session.query(TopicPatternElement).filter(
  49. TopicPatternElement.execution_id == execution_id
  50. ).all()
  51. if not rows:
  52. return {"message": "没有可处理的数据", "execution_id": execution_id}
  53. # 1) 去重 post_id 拉取 ROV
  54. all_post_ids = sorted({r.post_id for r in rows if r.post_id})
  55. rov_by_post_id = get_rov_by_merge_leve2_and_video_ids(merge_leve2, all_post_ids) if all_post_ids else {}
  56. # 2) 按 element_type 分组,计算 name 的平均 ROV 分
  57. grouped = {
  58. "实质": {
  59. "name_post_ids": defaultdict(set),
  60. "name_paths": defaultdict(set),
  61. },
  62. "形式": {
  63. "name_post_ids": defaultdict(set),
  64. "name_paths": defaultdict(set),
  65. },
  66. "意图": {
  67. "name_post_ids": defaultdict(set),
  68. "name_paths": defaultdict(set),
  69. },
  70. }
  71. for r in rows:
  72. element_type = (r.element_type or "").strip()
  73. if element_type not in grouped:
  74. continue
  75. name = (r.name or "").strip()
  76. if not name:
  77. continue
  78. if r.post_id:
  79. grouped[element_type]["name_post_ids"][name].add(r.post_id)
  80. if r.category_path:
  81. grouped[element_type]["name_paths"][name].add(r.category_path.strip())
  82. output_dir = Path(__file__).parent / "data" / str(execution_id)
  83. output_dir.mkdir(parents=True, exist_ok=True)
  84. summary = {"execution_id": execution_id, "merge_leve2": merge_leve2, "files": {}}
  85. for element_type, data in grouped.items():
  86. name_post_ids = data["name_post_ids"]
  87. name_paths = data["name_paths"]
  88. name_scores = {}
  89. for name, post_ids in name_post_ids.items():
  90. rovs = [_safe_float(rov_by_post_id.get(pid, 0.0)) for pid in post_ids]
  91. score = sum(rovs) / len(rovs) if rovs else 0.0
  92. name_scores[name] = score
  93. raw_elements = []
  94. for name, score in name_scores.items():
  95. post_ids_set = name_post_ids.get(name, set())
  96. raw_elements.append(
  97. {
  98. "name": name,
  99. "score": round(score, 6),
  100. # 不在结果文件里输出帖子 ID 明细,避免体积过大/泄露。
  101. "post_ids_count": len(post_ids_set),
  102. "category_paths": sorted(list(name_paths.get(name, set()))),
  103. }
  104. )
  105. # 通过(score, name)确保排序稳定,进而生成可重复的 id。
  106. element_payload = sorted(
  107. raw_elements,
  108. key=lambda x: (-x["score"], x["name"]),
  109. )
  110. # 3) 计算分类路径节点权重(节点分 = 覆盖的 name score 求和)
  111. category_scores, category_post_ids = _build_category_scores(
  112. name_scores, name_paths, name_post_ids
  113. )
  114. category_payload = sorted(
  115. [
  116. {
  117. "category_path": path,
  118. "category": path.split(">")[-1].strip() if path else "",
  119. "score": round(score, 6),
  120. "post_ids_count": len(category_post_ids.get(path, set())),
  121. }
  122. for path, score in category_scores.items()
  123. ],
  124. key=lambda x: x["score"],
  125. reverse=True,
  126. )
  127. element_file = output_dir / f"{element_type}_元素.json"
  128. category_file = output_dir / f"{element_type}_分类.json"
  129. _write_json(element_file, element_payload)
  130. _write_json(category_file, category_payload)
  131. summary["files"][f"{element_type}_元素"] = str(element_file)
  132. summary["files"][f"{element_type}_分类"] = str(category_file)
  133. return summary
  134. finally:
  135. session.close()
  136. if __name__ == '__main__':
  137. prepare(29)