generate_visualize_data.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505
  1. #!/usr/bin/env python3
  2. """
  3. 生成推导可视化数据。
  4. 输入参数:account_name, post_id, log_id
  5. - 从 input/{account_name}/解构内容/{post_id}.json 解析选题点列表
  6. - 从 output/{account_name}/推导日志/{post_id}/{log_id}/ 读取推导与评估 JSON,生成:
  7. 1. output/{account_name}/整体推导结果/{post_id}.json
  8. 2. output/{account_name}/整体推导路径可视化/{post_id}.json
  9. """
  10. import argparse
  11. import json
  12. import re
  13. from pathlib import Path
  14. from typing import Any
  15. def _collect_dimension_names(point_data: dict) -> dict[str, str]:
  16. """从点的 实质/形式/意图 中收集 名称 -> dimension。"""
  17. name_to_dim = {}
  18. if "实质" in point_data and point_data["实质"]:
  19. for key in ("具体元素", "具象概念", "抽象概念"):
  20. for item in (point_data["实质"].get(key) or []):
  21. n = item.get("名称")
  22. if n:
  23. name_to_dim[n] = "实质"
  24. if "形式" in point_data and point_data["形式"]:
  25. for key in ("具体元素形式", "具象概念形式", "整体形式"):
  26. for item in (point_data["形式"].get(key) or []):
  27. n = item.get("名称")
  28. if n:
  29. name_to_dim[n] = "形式"
  30. if point_data.get("意图"):
  31. for item in point_data["意图"]:
  32. n = item.get("名称")
  33. if n:
  34. name_to_dim[n] = "意图"
  35. return name_to_dim
  36. def parse_topic_points_from_deconstruct(deconstruct_path: Path) -> list[dict[str, Any]]:
  37. """
  38. 从 input/{account_name}/解构内容/{post_id}.json 解析选题点列表。
  39. 选题点来自分词结果中的「词」,字段:name, point, dimension, root_source, root_sources_desc。
  40. """
  41. if not deconstruct_path.exists():
  42. raise FileNotFoundError(f"解构内容文件不存在: {deconstruct_path}")
  43. with open(deconstruct_path, "r", encoding="utf-8") as f:
  44. data = json.load(f)
  45. result = []
  46. for point_type in ("灵感点", "目的点", "关键点"):
  47. for point in data.get(point_type) or []:
  48. root_source = point.get("点", "")
  49. root_sources_desc = point.get("点描述", "")
  50. name_to_dim = _collect_dimension_names(point)
  51. for word_item in point.get("分词结果") or []:
  52. name = word_item.get("词", "").strip()
  53. if not name:
  54. continue
  55. dimension = name_to_dim.get(name, "实质")
  56. result.append({
  57. "name": name,
  58. "point": point_type,
  59. "dimension": dimension,
  60. "root_source": root_source,
  61. "root_sources_desc": root_sources_desc,
  62. })
  63. return result
  64. def _topic_point_key(t: dict) -> tuple:
  65. return (t["name"], t["point"], t["dimension"])
  66. def load_derivation_logs(log_dir: Path) -> tuple[list[dict], list[dict]]:
  67. """
  68. 从 output/{account_name}/推导日志/{post_id}/{log_id}/ 读取所有 {轮次}_推导.json 与 {轮次}_评估.json。
  69. 返回 (推导列表按轮次序, 评估列表按轮次序)。
  70. """
  71. if not log_dir.is_dir():
  72. raise FileNotFoundError(f"推导日志目录不存在: {log_dir}")
  73. derivation_by_round = {}
  74. eval_by_round = {}
  75. for p in log_dir.glob("*.json"):
  76. base = p.stem
  77. m = re.match(r"^(\d+)_(推导|评估)$", base)
  78. if not m:
  79. continue
  80. round_num = int(m.group(1))
  81. with open(p, "r", encoding="utf-8") as f:
  82. content = json.load(f)
  83. if m.group(2) == "推导":
  84. derivation_by_round[round_num] = content
  85. else:
  86. eval_by_round[round_num] = content
  87. rounds = sorted(set(derivation_by_round) | set(eval_by_round))
  88. derivations = [derivation_by_round[r] for r in rounds if r in derivation_by_round]
  89. evals = [eval_by_round[r] for r in rounds if r in eval_by_round]
  90. return derivations, evals
  91. def build_derivation_result(
  92. topic_points: list[dict],
  93. derivations: list[dict],
  94. evals: list[dict],
  95. ) -> list[dict]:
  96. """
  97. 生成整体推导结果:每轮 轮次、推导成功的选题点、未推导成功的选题点、本次新推导成功的选题点。
  98. 选题点用 topic_points 中的完整信息;按 name 判定是否被推导(评估中的 match_post_point)。
  99. 若之前推导成功的选题点 is_fully_derived=false,本轮变为 is_fully_derived=true,则算本次新推导成功的选题点,
  100. 且 matched_score、is_fully_derived 在本轮后更新为该轮评估值。
  101. 推导成功的选题点:使用当前已更新的 best (matched_score, is_fully_derived)。
  102. 本次新推导成功的选题点:用当轮评估的 matched_score、is_fully_derived。
  103. 未推导成功的选题点:不包含 matched_score、is_fully_derived。
  104. """
  105. all_keys = {_topic_point_key(t) for t in topic_points}
  106. topic_by_key = {_topic_point_key(t): t for t in topic_points}
  107. # 分轮次收集 (round_num, name) -> (matched_score, is_fully_derived),同一轮同名保留 matched_score 最高的
  108. score_by_round_name: dict[tuple[int, str], tuple[float, bool]] = {}
  109. for round_idx, eval_data in enumerate(evals):
  110. round_num = eval_data.get("round", round_idx + 1)
  111. for er in eval_data.get("eval_results") or []:
  112. if not (er.get("is_matched") is True or er.get("match_result") == "匹配"):
  113. continue
  114. mp = (er.get("matched_post_point") or er.get("matched_post_topic") or er.get("match_post_point") or "").strip()
  115. if not mp:
  116. continue
  117. score = er.get("matched_score")
  118. if score is None:
  119. score = 1.0
  120. else:
  121. try:
  122. score = float(score)
  123. except (TypeError, ValueError):
  124. score = 1.0
  125. is_fully = er.get("is_fully_derived", True)
  126. key = (round_num, mp)
  127. if key not in score_by_round_name or score > score_by_round_name[key][0]:
  128. score_by_round_name[key] = (score, bool(is_fully))
  129. result = []
  130. derived_names_so_far: set[str] = set()
  131. fully_derived_names_so_far: set[str] = set() # 已出现过 is_fully_derived=true 的选题点
  132. best_score_by_name: dict[str, tuple[float, bool]] = {} # name -> (matched_score, is_fully_derived),遇 is_fully=true 时更新
  133. for i, (derivation, eval_data) in enumerate(zip(derivations, evals)):
  134. round_num = derivation.get("round", i + 1)
  135. eval_results = eval_data.get("eval_results") or []
  136. matched_post_points = set()
  137. for er in eval_results:
  138. if not (er.get("is_matched") is True or er.get("match_result") == "匹配"):
  139. continue
  140. mp = er.get("matched_post_point") or er.get("matched_post_topic") or er.get("match_post_point") or ""
  141. if mp and str(mp).strip():
  142. matched_post_points.add(str(mp).strip())
  143. # 本轮每个匹配名的 (score, is_fully)
  144. this_round_scores: dict[str, tuple[float, bool]] = {}
  145. for name in matched_post_points:
  146. val = score_by_round_name.get((round_num, name))
  147. if val is not None:
  148. this_round_scores[name] = val
  149. # 本次新推导成功:首次匹配 或 之前 is_fully=false 且本轮 is_fully=true
  150. new_derived_names = set()
  151. for name in matched_post_points:
  152. score, is_fully = this_round_scores.get(name, (None, False))
  153. if name not in derived_names_so_far:
  154. new_derived_names.add(name)
  155. elif name not in fully_derived_names_so_far and is_fully:
  156. new_derived_names.add(name)
  157. # 更新推导集合与 best:首次出现或本轮 is_fully=true 时更新 best
  158. derived_names_so_far |= matched_post_points
  159. for name in matched_post_points:
  160. val = this_round_scores.get(name)
  161. if val is None:
  162. continue
  163. score, is_fully = val
  164. if name not in best_score_by_name:
  165. best_score_by_name[name] = (score, is_fully)
  166. elif is_fully:
  167. best_score_by_name[name] = (score, is_fully)
  168. if is_fully:
  169. fully_derived_names_so_far.add(name)
  170. derived_keys = {k for k in all_keys if topic_by_key[k]["name"] in derived_names_so_far}
  171. new_derived_keys = {k for k in all_keys if topic_by_key[k]["name"] in new_derived_names}
  172. not_derived_keys = all_keys - derived_keys
  173. sort_derived = sorted(derived_keys, key=lambda k: (topic_by_key[k]["name"], k[1], k[2]))
  174. sort_new = sorted(new_derived_keys, key=lambda k: (topic_by_key[k]["name"], k[1], k[2]))
  175. sort_not = sorted(not_derived_keys, key=lambda k: (topic_by_key[k]["name"], k[1], k[2]))
  176. def add_score_fields(keys: set, sort_keys: list, round_for_score: int | None) -> list[dict]:
  177. """round_for_score: 用该轮评估的分数;若为 None 则不添加 score 字段。"""
  178. out = []
  179. for k in sort_keys:
  180. if k not in keys:
  181. continue
  182. obj = dict(topic_by_key[k])
  183. if round_for_score is not None:
  184. name = obj.get("name", "")
  185. val = score_by_round_name.get((round_for_score, name))
  186. if val is not None:
  187. obj["matched_score"] = val[0]
  188. obj["is_fully_derived"] = val[1]
  189. else:
  190. obj["matched_score"] = None
  191. obj["is_fully_derived"] = False
  192. out.append(obj)
  193. return out
  194. # 推导成功的选题点:用当前已更新的 best (matched_score, is_fully_derived)
  195. derived_list = []
  196. for k in sort_derived:
  197. if k not in derived_keys:
  198. continue
  199. obj = dict(topic_by_key[k])
  200. name = obj.get("name", "")
  201. val = best_score_by_name.get(name)
  202. if val is not None:
  203. obj["matched_score"] = val[0]
  204. obj["is_fully_derived"] = val[1]
  205. else:
  206. obj["matched_score"] = None
  207. obj["is_fully_derived"] = False
  208. derived_list.append(obj)
  209. new_list = add_score_fields(new_derived_keys, sort_new, round_for_score=round_num)
  210. not_derived_list = [dict(topic_by_key[k]) for k in sort_not] # 不带 matched_score、is_fully_derived
  211. result.append({
  212. "轮次": round_num,
  213. "推导成功的选题点": derived_list,
  214. "未推导成功的选题点": not_derived_list,
  215. "本次新推导成功的选题点": new_list,
  216. })
  217. return result
  218. def _tree_node_display_name(raw: str) -> str:
  219. """人设节点可能是 a.b.c 路径形式,实际需要的是最后一段节点名 c。"""
  220. s = (raw or "").strip()
  221. if "." in s:
  222. return s.rsplit(".", 1)[-1].strip() or s
  223. return s
  224. def _to_tree_node(name: str, extra: dict | None = None) -> dict:
  225. d = {"name": name}
  226. if extra:
  227. d.update(extra)
  228. return d
  229. def _to_pattern_node(pattern_name: str) -> dict:
  230. """将 pattern 字符串转为 input_pattern_nodes 的一项(简化版)。"""
  231. items = [x.strip() for x in pattern_name.replace("+", " ").split() if x.strip()]
  232. return {
  233. "items": [{"name": x, "point": "关键点", "dimension": "形式", "type": "标签"} for x in items],
  234. "match_items": items,
  235. }
  236. def build_visualize_edges(
  237. derivations: list[dict],
  238. evals: list[dict],
  239. topic_points: list[dict],
  240. ) -> tuple[list[dict], list[dict]]:
  241. """
  242. 生成 node_list(所有评估通过的帖子选题点)和 edge_list(只保留评估通过的推导路径)。
  243. - node_list:同一轮内节点不重复,重复时保留 matched_score 更高的;节点带 matched_score、is_fully_derived。
  244. - edge_list:边带 level(与 output 节点 level 一致);同一轮内 output 节点不重复;若前面轮次该节点匹配分更高则本轮不保留该节点。
  245. 评估数据支持 path_id(对应推导 derivation_results[].id)、item_id(output 中元素从 1 起的序号)、matched_score、is_fully_derived。
  246. """
  247. derivations = sorted(derivations, key=lambda d: d.get("round", 0))
  248. evals = sorted(evals, key=lambda e: e.get("round", 0))
  249. topic_by_name = {t["name"]: t for t in topic_points}
  250. # 评估匹配:(round_num, path_id, item_id) -> (matched_post_point, matched_reason, matched_score, is_fully_derived)
  251. # path_id = 推导中 derivation_results[].id,item_id = output 中元素从 1 起的序号
  252. match_by_path_item: dict[tuple[int, int, int], tuple[str, str, float, bool]] = {}
  253. match_by_round_output: dict[tuple[int, str], tuple[str, str, float, bool]] = {} # 兼容无 path_id/item_id
  254. for round_idx, eval_data in enumerate(evals):
  255. round_num = eval_data.get("round", round_idx + 1)
  256. for er in eval_data.get("eval_results") or []:
  257. if not (er.get("is_matched") is True or er.get("match_result") == "匹配"):
  258. continue
  259. mp = (er.get("matched_post_point") or er.get("matched_post_topic") or er.get("match_post_point") or "").strip()
  260. if not mp:
  261. continue
  262. out_point = (er.get("derivation_output_point") or "").strip()
  263. reason = (er.get("matched_reason") or er.get("match_reason") or "").strip()
  264. score = er.get("matched_score")
  265. if score is None:
  266. score = 1.0
  267. else:
  268. try:
  269. score = float(score)
  270. except (TypeError, ValueError):
  271. score = 1.0
  272. is_fully = er.get("is_fully_derived", True)
  273. val = (mp, reason, score, bool(is_fully))
  274. path_id = er.get("path_id")
  275. item_id = er.get("item_id")
  276. if path_id is not None and item_id is not None:
  277. try:
  278. match_by_path_item[(round_num, int(path_id), int(item_id))] = val
  279. except (TypeError, ValueError):
  280. pass
  281. if out_point:
  282. k = (round_num, out_point)
  283. if k not in match_by_round_output:
  284. match_by_round_output[k] = val
  285. # 按 (round_num, mp) 收集节点候选,同轮同节点保留 matched_score 最高的一条
  286. node_candidates: dict[tuple[int, str], dict] = {} # (round_num, mp) -> node_dict (含 score, is_fully_derived)
  287. def get_match(round_num: int, path_id: int | None, item_id: int | None, out_item: str) -> tuple[str, str, float, bool] | None:
  288. if path_id is not None and item_id is not None:
  289. v = match_by_path_item.get((round_num, path_id, item_id))
  290. if v is not None:
  291. return v
  292. return match_by_round_output.get((round_num, out_item))
  293. edge_list = []
  294. round_output_seen: set[tuple[int, str]] = set() # (round_num, node_name) 本轮已作为某边的 output
  295. best_score_by_node: dict[str, float] = {} # node_name -> 已出现过的最高 matched_score
  296. fully_derived_nodes: set[str] = set()
  297. for round_idx, derivation in enumerate(derivations):
  298. round_num = derivation.get("round", round_idx + 1)
  299. for dr in derivation.get("derivation_results") or []:
  300. output_list = dr.get("output") or []
  301. path_id = dr.get("id")
  302. matched: list[tuple[str, str, float, bool, str]] = [] # (mp, reason, score, is_fully, derivation_out)
  303. for i, out_item in enumerate(output_list):
  304. item_id = i + 1
  305. v = get_match(round_num, path_id, item_id, out_item)
  306. if not v:
  307. continue
  308. mp, reason, score, is_fully = v
  309. matched.append((mp, reason, score, is_fully, out_item))
  310. if not matched:
  311. continue
  312. # 同一轮内 output 节点不重复;若前面轮次该节点匹配分更高则本轮不保留
  313. output_names_this_edge = []
  314. for mp, reason, score, is_fully, out_item in matched:
  315. if (round_num, mp) in round_output_seen:
  316. continue
  317. if mp in fully_derived_nodes:
  318. continue
  319. if score <= best_score_by_node.get(mp, -1.0):
  320. continue
  321. output_names_this_edge.append((mp, reason, score, is_fully, out_item))
  322. if not output_names_this_edge:
  323. continue
  324. for mp, _r, score, _f, _o in output_names_this_edge:
  325. round_output_seen.add((round_num, mp))
  326. best_score_by_node[mp] = max(best_score_by_node.get(mp, -1.0), score)
  327. # 节点候选:同轮同节点保留匹配分更高的
  328. for mp, _reason, score, is_fully, out_item in output_names_this_edge:
  329. key = (round_num, mp)
  330. if key not in node_candidates or node_candidates[key].get("matched_score", 0) < score:
  331. node = dict(topic_by_name.get(mp, {"name": mp, "point": "", "dimension": "", "root_source": "", "root_sources_desc": ""}))
  332. node["level"] = round_num
  333. node.setdefault("original_word", node.get("name", mp))
  334. node["derivation_type"] = dr.get("method", "")
  335. node["matched_score"] = score
  336. node["is_fully_derived"] = is_fully
  337. # 对应评估中的 derivation_output_point
  338. node["derivation_output_point"] = out_item
  339. node_candidates[key] = node
  340. input_data = dr.get("input") or {}
  341. derived_nodes = input_data.get("derived_nodes") or []
  342. tree_nodes = input_data.get("tree_nodes") or []
  343. patterns = input_data.get("patterns") or []
  344. input_post_nodes = [{"name": x} for x in derived_nodes]
  345. input_tree_nodes = [_to_tree_node(_tree_node_display_name(x)) for x in tree_nodes]
  346. if patterns and isinstance(patterns[0], str):
  347. input_pattern_nodes = [_to_pattern_node(p) for p in patterns]
  348. elif patterns and isinstance(patterns[0], dict):
  349. input_pattern_nodes = patterns
  350. else:
  351. input_pattern_nodes = []
  352. output_nodes = []
  353. reasons_list = []
  354. derivation_points_list = []
  355. for mp, reason, score, is_fully, out_item in output_names_this_edge:
  356. output_nodes.append({"name": mp, "matched_score": score, "is_fully_derived": is_fully})
  357. reasons_list.append(reason)
  358. derivation_points_list.append(out_item)
  359. detail = {
  360. "reason": dr.get("reason", ""),
  361. "评估结果": "匹配成功",
  362. }
  363. if any(reasons_list):
  364. detail["匹配理由"] = reasons_list
  365. detail["待比对的推导选题点"] = derivation_points_list
  366. if dr.get("tools"):
  367. detail["tools"] = dr["tools"]
  368. edge_list.append({
  369. "name": dr.get("method", "") or f"推导-{round_num}",
  370. "level": round_num,
  371. "input_post_nodes": input_post_nodes,
  372. "input_tree_nodes": input_tree_nodes,
  373. "input_pattern_nodes": input_pattern_nodes,
  374. "output_nodes": output_nodes,
  375. "detail": detail,
  376. })
  377. for (rn, name), nd in node_candidates.items():
  378. if rn == round_num and nd.get("is_fully_derived"):
  379. fully_derived_nodes.add(name)
  380. node_list = list(node_candidates.values())
  381. return node_list, edge_list
  382. def _find_project_root() -> Path:
  383. """从脚本所在目录向上查找包含 .git 的项目根目录。"""
  384. p = Path(__file__).resolve().parent
  385. while p != p.parent:
  386. if (p / ".git").is_dir():
  387. return p
  388. p = p.parent
  389. return Path(__file__).resolve().parent
  390. def generate_visualize_data(account_name: str, post_id: str, log_id: str, base_dir: Path | None = None) -> None:
  391. """
  392. 主流程:读取解构内容与推导日志,生成整体推导结果与整体推导路径可视化两个 JSON。
  393. base_dir 默认为脚本所在目录;若其下 output/.../推导日志 不存在,则尝试项目根目录下的 output/...(兼容从项目根运行)。
  394. """
  395. if base_dir is None:
  396. base_dir = Path(__file__).resolve().parent
  397. input_dir = base_dir / "input" / account_name / "原始数据" / "解构内容"
  398. log_dir = base_dir / "output" / account_name / "推导日志" / post_id / log_id
  399. result_dir = base_dir / "output" / account_name / "整体推导结果"
  400. visualize_dir = base_dir / "output" / account_name / "整体推导路径可视化"
  401. # 兼容:若推导日志不在 base_dir 下,尝试项目根目录下的 output/
  402. if not log_dir.is_dir():
  403. project_root = _find_project_root()
  404. if project_root != base_dir:
  405. alt_log = project_root / "output" / account_name / "推导日志" / post_id / log_id
  406. if alt_log.is_dir():
  407. log_dir = alt_log
  408. result_dir = project_root / "output" / account_name / "整体推导结果"
  409. visualize_dir = project_root / "output" / account_name / "整体推导路径可视化"
  410. deconstruct_path = input_dir / f"{post_id}.json"
  411. topic_points = parse_topic_points_from_deconstruct(deconstruct_path)
  412. derivations, evals = load_derivation_logs(log_dir)
  413. if not derivations or not evals:
  414. raise ValueError(f"推导或评估数据为空: {log_dir}")
  415. # 2.1 整体推导结果
  416. derivation_result = build_derivation_result(topic_points, derivations, evals)
  417. result_dir.mkdir(parents=True, exist_ok=True)
  418. result_path = result_dir / f"{post_id}.json"
  419. with open(result_path, "w", encoding="utf-8") as f:
  420. json.dump(derivation_result, f, ensure_ascii=False, indent=4)
  421. print(f"已写入整体推导结果: {result_path}")
  422. # 2.2 整体推导路径可视化
  423. node_list, edge_list = build_visualize_edges(derivations, evals, topic_points)
  424. visualize_path = visualize_dir / f"{post_id}.json"
  425. visualize_dir.mkdir(parents=True, exist_ok=True)
  426. with open(visualize_path, "w", encoding="utf-8") as f:
  427. json.dump({"node_list": node_list, "edge_list": edge_list}, f, ensure_ascii=False, indent=4)
  428. print(f"已写入整体推导路径可视化: {visualize_path}")
  429. def main(account_name, post_id, log_id):
  430. # parser = argparse.ArgumentParser(description="生成推导可视化数据")
  431. # parser.add_argument("account_name", help="账号名,如 家有大志")
  432. # parser.add_argument("post_id", help="帖子 ID")
  433. # parser.add_argument("log_id", help="推导日志 ID,如 20260303204232")
  434. # parser.add_argument("--base-dir", type=Path, default=None, help="项目根目录,默认为本脚本所在目录")
  435. # args = parser.parse_args()
  436. generate_visualize_data(account_name=account_name, post_id=post_id, log_id=log_id)
  437. if __name__ == "__main__":
  438. account_name="家有大志"
  439. post_id = "68fb6a5c000000000302e5de"
  440. log_id="20260317112639"
  441. main(account_name, post_id, log_id)