generate_visualize_data.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344
  1. #!/usr/bin/env python3
  2. """
  3. 生成推导可视化数据。
  4. 输入参数:account_name, post_id, log_id
  5. - 从 input/{account_name}/解构内容/{post_id}.json 解析选题点列表
  6. - 从 output/{account_name}/推导日志/{post_id}/{log_id}/ 读取推导与评估 JSON,生成:
  7. 1. output/{account_name}/整体推导结果/{post_id}.json
  8. 2. output/{account_name}/整体推导路径可视化/{post_id}.json
  9. """
  10. import argparse
  11. import json
  12. import re
  13. from pathlib import Path
  14. from typing import Any
  15. def _collect_dimension_names(point_data: dict) -> dict[str, str]:
  16. """从点的 实质/形式/意图 中收集 名称 -> dimension。"""
  17. name_to_dim = {}
  18. if "实质" in point_data and point_data["实质"]:
  19. for key in ("具体元素", "具象概念", "抽象概念"):
  20. for item in (point_data["实质"].get(key) or []):
  21. n = item.get("名称")
  22. if n:
  23. name_to_dim[n] = "实质"
  24. if "形式" in point_data and point_data["形式"]:
  25. for key in ("具体元素形式", "具象概念形式", "整体形式"):
  26. for item in (point_data["形式"].get(key) or []):
  27. n = item.get("名称")
  28. if n:
  29. name_to_dim[n] = "形式"
  30. if point_data.get("意图"):
  31. for item in point_data["意图"]:
  32. n = item.get("名称")
  33. if n:
  34. name_to_dim[n] = "意图"
  35. return name_to_dim
  36. def parse_topic_points_from_deconstruct(deconstruct_path: Path) -> list[dict[str, Any]]:
  37. """
  38. 从 input/{account_name}/解构内容/{post_id}.json 解析选题点列表。
  39. 选题点来自分词结果中的「词」,字段:name, point, dimension, root_source, root_sources_desc。
  40. """
  41. if not deconstruct_path.exists():
  42. raise FileNotFoundError(f"解构内容文件不存在: {deconstruct_path}")
  43. with open(deconstruct_path, "r", encoding="utf-8") as f:
  44. data = json.load(f)
  45. result = []
  46. for point_type in ("灵感点", "目的点", "关键点"):
  47. for point in data.get(point_type) or []:
  48. root_source = point.get("点", "")
  49. root_sources_desc = point.get("点描述", "")
  50. name_to_dim = _collect_dimension_names(point)
  51. for word_item in point.get("分词结果") or []:
  52. name = word_item.get("词", "").strip()
  53. if not name:
  54. continue
  55. dimension = name_to_dim.get(name, "实质")
  56. result.append({
  57. "name": name,
  58. "point": point_type,
  59. "dimension": dimension,
  60. "root_source": root_source,
  61. "root_sources_desc": root_sources_desc,
  62. })
  63. return result
  64. def _topic_point_key(t: dict) -> tuple:
  65. return (t["name"], t["point"], t["dimension"])
  66. def load_derivation_logs(log_dir: Path) -> tuple[list[dict], list[dict]]:
  67. """
  68. 从 output/{account_name}/推导日志/{post_id}/{log_id}/ 读取所有 {轮次}_推导.json 与 {轮次}_评估.json。
  69. 返回 (推导列表按轮次序, 评估列表按轮次序)。
  70. """
  71. if not log_dir.is_dir():
  72. raise FileNotFoundError(f"推导日志目录不存在: {log_dir}")
  73. derivation_by_round = {}
  74. eval_by_round = {}
  75. for p in log_dir.glob("*.json"):
  76. base = p.stem
  77. m = re.match(r"^(\d+)_(推导|评估)$", base)
  78. if not m:
  79. continue
  80. round_num = int(m.group(1))
  81. with open(p, "r", encoding="utf-8") as f:
  82. content = json.load(f)
  83. if m.group(2) == "推导":
  84. derivation_by_round[round_num] = content
  85. else:
  86. eval_by_round[round_num] = content
  87. rounds = sorted(set(derivation_by_round) | set(eval_by_round))
  88. derivations = [derivation_by_round[r] for r in rounds if r in derivation_by_round]
  89. evals = [eval_by_round[r] for r in rounds if r in eval_by_round]
  90. return derivations, evals
  91. def build_derivation_result(
  92. topic_points: list[dict],
  93. derivations: list[dict],
  94. evals: list[dict],
  95. ) -> list[dict]:
  96. """
  97. 生成整体推导结果:每轮 轮次、推导成功的选题点、未推导成功的选题点、本次新推导成功的选题点。
  98. 选题点用 topic_points 中的完整信息;按 name 判定是否被推导(评估中的 match_post_point)。
  99. """
  100. all_keys = {_topic_point_key(t) for t in topic_points}
  101. topic_by_key = {_topic_point_key(t): t for t in topic_points}
  102. result = []
  103. derived_names_so_far: set[str] = set()
  104. for i, (derivation, eval_data) in enumerate(zip(derivations, evals)):
  105. round_num = derivation.get("round", i + 1)
  106. eval_results = eval_data.get("eval_results") or []
  107. matched_post_points = set()
  108. for er in eval_results:
  109. # 新格式: is_matched;旧格式: match_result == "匹配"
  110. if not (er.get("is_matched") is True or er.get("match_result") == "匹配"):
  111. continue
  112. mp = er.get("matched_post_point") or er.get("matched_post_topic") or er.get("match_post_point") or ""
  113. if mp and str(mp).strip():
  114. matched_post_points.add(str(mp).strip())
  115. new_derived_names = matched_post_points - derived_names_so_far
  116. derived_names_so_far |= matched_post_points
  117. # 推导成功的选题点:name 在 derived_names_so_far 中的选题点(每 name 取一条,与 topic_points 顺序一致)
  118. derived_keys = {k for k in all_keys if topic_by_key[k]["name"] in derived_names_so_far}
  119. new_derived_keys = {k for k in all_keys if topic_by_key[k]["name"] in new_derived_names}
  120. not_derived_keys = all_keys - derived_keys
  121. derived_list = [dict(topic_by_key[k]) for k in sorted(derived_keys, key=lambda k: (topic_by_key[k]["name"], k[1], k[2]))]
  122. new_list = [dict(topic_by_key[k]) for k in sorted(new_derived_keys, key=lambda k: (topic_by_key[k]["name"], k[1], k[2]))]
  123. not_derived_list = [dict(topic_by_key[k]) for k in sorted(not_derived_keys, key=lambda k: (topic_by_key[k]["name"], k[1], k[2]))]
  124. result.append({
  125. "轮次": round_num,
  126. "推导成功的选题点": derived_list,
  127. "未推导成功的选题点": not_derived_list,
  128. "本次新推导成功的选题点": new_list,
  129. })
  130. return result
  131. def _tree_node_display_name(raw: str) -> str:
  132. """人设节点可能是 a.b.c 路径形式,实际需要的是最后一段节点名 c。"""
  133. s = (raw or "").strip()
  134. if "." in s:
  135. return s.rsplit(".", 1)[-1].strip() or s
  136. return s
  137. def _to_tree_node(name: str, extra: dict | None = None) -> dict:
  138. d = {"name": name}
  139. if extra:
  140. d.update(extra)
  141. return d
  142. def _to_pattern_node(pattern_name: str) -> dict:
  143. """将 pattern 字符串转为 input_pattern_nodes 的一项(简化版)。"""
  144. items = [x.strip() for x in pattern_name.replace("+", " ").split() if x.strip()]
  145. return {
  146. "items": [{"name": x, "point": "关键点", "dimension": "形式", "type": "标签"} for x in items],
  147. "match_items": items,
  148. }
  149. def build_visualize_edges(
  150. derivations: list[dict],
  151. evals: list[dict],
  152. topic_points: list[dict],
  153. ) -> tuple[list[dict], list[dict]]:
  154. """
  155. 生成 node_list(所有评估通过的帖子选题点)和 edge_list(只保留评估通过的推导路径)。
  156. 按轮次从小到大处理,保证每个输出节点最多只出现在一条边的 output_nodes 里,且保留的是前面轮次的数据。
  157. """
  158. # 按轮次从小到大排序,确保优先使用前面轮次的输出节点
  159. derivations = sorted(derivations, key=lambda d: d.get("round", 0))
  160. evals = sorted(evals, key=lambda e: e.get("round", 0))
  161. topic_by_name = {}
  162. for t in topic_points:
  163. name = t["name"]
  164. if name not in topic_by_name:
  165. topic_by_name[name] = t
  166. # 按 (round, id, derivation_output_point) 建立评估匹配表;新格式用 is_matched+id,旧格式用 match_result 且无 id
  167. match_by_round_id_output: dict[tuple[int, int, str], str] = {}
  168. # 按 (round, id) 收集该条推导在评估中且 is_matched 为 true 的 derivation_output_point 列表,供 detail「待比对的推导选题点」使用
  169. round_id_to_output_points: dict[tuple[int, int], list[str]] = {}
  170. for round_idx, eval_data in enumerate(evals):
  171. round_num = eval_data.get("round", round_idx + 1)
  172. for er in eval_data.get("eval_results") or []:
  173. if not (er.get("is_matched") is True or er.get("match_result") == "匹配"):
  174. continue
  175. out_point = (er.get("derivation_output_point") or "").strip()
  176. dr_id = er.get("id") if er.get("id") is not None else -1
  177. key = (round_num, dr_id)
  178. if key not in round_id_to_output_points:
  179. round_id_to_output_points[key] = []
  180. if out_point:
  181. round_id_to_output_points[key].append(out_point)
  182. mp = er.get("matched_post_point") or er.get("matched_post_topic") or er.get("match_post_point") or ""
  183. if out_point and mp and str(mp).strip():
  184. mp = str(mp).strip()
  185. if dr_id != -1:
  186. match_by_round_id_output[(round_num, dr_id, out_point)] = mp
  187. else:
  188. match_by_round_id_output[(round_num, -1, out_point)] = mp
  189. node_list = []
  190. seen_nodes = set()
  191. edge_list = []
  192. level_by_name = {}
  193. output_nodes_seen: set[str] = set() # 已在之前边的 output_nodes 中出现过的节点,避免同一输出节点对应多条边
  194. for round_idx, derivation in enumerate(derivations):
  195. round_num = derivation.get("round", round_idx + 1)
  196. for dr in derivation.get("derivation_results") or []:
  197. dr_id = dr.get("id")
  198. output_list = dr.get("output") or []
  199. matched_outputs = []
  200. for out_item in output_list:
  201. if dr_id is not None:
  202. mp = match_by_round_id_output.get((round_num, dr_id, out_item))
  203. else:
  204. mp = match_by_round_id_output.get((round_num, -1, out_item))
  205. if not mp:
  206. continue
  207. matched_outputs.append(mp)
  208. if mp not in seen_nodes:
  209. seen_nodes.add(mp)
  210. node = dict(topic_by_name.get(mp, {"name": mp, "point": "", "dimension": "", "root_source": "", "root_sources_desc": ""}))
  211. node["level"] = round_num
  212. if "original_word" not in node:
  213. node["original_word"] = node.get("name", mp)
  214. node["derivation_type"] = dr.get("method", "")
  215. level_by_name[mp] = round_num
  216. node_list.append(node)
  217. if not matched_outputs:
  218. continue
  219. # 只保留尚未在之前边的 output_nodes 中出现过的节点,避免同一输出节点对应多条边
  220. output_names_this_edge = [x for x in matched_outputs if x not in output_nodes_seen]
  221. if not output_names_this_edge:
  222. continue
  223. output_nodes_seen.update(output_names_this_edge)
  224. input_data = dr.get("input") or {}
  225. derived_nodes = input_data.get("derived_nodes") or []
  226. tree_nodes = input_data.get("tree_nodes") or []
  227. patterns = input_data.get("patterns") or []
  228. input_post_nodes = [{"name": x} for x in derived_nodes]
  229. input_tree_nodes = [_to_tree_node(_tree_node_display_name(x)) for x in tree_nodes]
  230. if patterns and isinstance(patterns[0], str):
  231. input_pattern_nodes = [_to_pattern_node(p) for p in patterns]
  232. elif patterns and isinstance(patterns[0], dict):
  233. input_pattern_nodes = patterns
  234. else:
  235. input_pattern_nodes = []
  236. output_nodes = [{"name": x} for x in output_names_this_edge]
  237. detail = {
  238. "reason": dr.get("reason", ""),
  239. "评估结果": "匹配成功",
  240. }
  241. key_dr = (round_num, dr_id if dr_id is not None else -1)
  242. detail["待比对的推导选题点"] = round_id_to_output_points.get(key_dr, [])
  243. if dr.get("tools"):
  244. detail["tools"] = dr["tools"]
  245. edge_list.append({
  246. "name": dr.get("method", "") or f"推导-{round_num}",
  247. "input_post_nodes": input_post_nodes,
  248. "input_tree_nodes": input_tree_nodes,
  249. "input_pattern_nodes": input_pattern_nodes,
  250. "output_nodes": output_nodes,
  251. "detail": detail,
  252. })
  253. return node_list, edge_list
  254. def generate_visualize_data(account_name: str, post_id: str, log_id: str, base_dir: Path | None = None) -> None:
  255. """
  256. 主流程:读取解构内容与推导日志,生成整体推导结果与整体推导路径可视化两个 JSON。
  257. """
  258. if base_dir is None:
  259. base_dir = Path(__file__).resolve().parent
  260. input_dir = base_dir / "input" / account_name / "原始数据" / "解构内容"
  261. log_dir = base_dir / "output" / account_name / "推导日志" / post_id / log_id
  262. result_dir = base_dir / "output" / account_name / "整体推导结果"
  263. visualize_dir = base_dir / "output" / account_name / "整体推导路径可视化"
  264. deconstruct_path = input_dir / f"{post_id}.json"
  265. topic_points = parse_topic_points_from_deconstruct(deconstruct_path)
  266. derivations, evals = load_derivation_logs(log_dir)
  267. if not derivations or not evals:
  268. raise ValueError(f"推导或评估数据为空: {log_dir}")
  269. # 2.1 整体推导结果
  270. derivation_result = build_derivation_result(topic_points, derivations, evals)
  271. result_dir.mkdir(parents=True, exist_ok=True)
  272. result_path = result_dir / f"{post_id}.json"
  273. with open(result_path, "w", encoding="utf-8") as f:
  274. json.dump(derivation_result, f, ensure_ascii=False, indent=4)
  275. print(f"已写入整体推导结果: {result_path}")
  276. # 2.2 整体推导路径可视化
  277. node_list, edge_list = build_visualize_edges(derivations, evals, topic_points)
  278. visualize_path = visualize_dir / f"{post_id}.json"
  279. visualize_dir.mkdir(parents=True, exist_ok=True)
  280. with open(visualize_path, "w", encoding="utf-8") as f:
  281. json.dump({"node_list": node_list, "edge_list": edge_list}, f, ensure_ascii=False, indent=4)
  282. print(f"已写入整体推导路径可视化: {visualize_path}")
  283. def main(account_name, post_id, log_id):
  284. # parser = argparse.ArgumentParser(description="生成推导可视化数据")
  285. # parser.add_argument("account_name", help="账号名,如 家有大志")
  286. # parser.add_argument("post_id", help="帖子 ID")
  287. # parser.add_argument("log_id", help="推导日志 ID,如 20260303204232")
  288. # parser.add_argument("--base-dir", type=Path, default=None, help="项目根目录,默认为本脚本所在目录")
  289. # args = parser.parse_args()
  290. generate_visualize_data(account_name=account_name, post_id=post_id, log_id=log_id)
  291. if __name__ == "__main__":
  292. account_name="家有大志"
  293. post_id = "68fb6a5c000000000302e5de"
  294. log_id="20260305102218"
  295. main(account_name, post_id, log_id)