sug_v6_1_2_3.py 69 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908
  1. import asyncio
  2. import json
  3. import os
  4. import sys
  5. import argparse
  6. from datetime import datetime
  7. from agents import Agent, Runner
  8. from lib.my_trace import set_trace
  9. from typing import Literal
  10. from pydantic import BaseModel, Field
  11. from lib.utils import read_file_as_string
  12. from lib.client import get_model
  13. MODEL_NAME = "google/gemini-2.5-flash"
  14. from script.search_recommendations.xiaohongshu_search_recommendations import XiaohongshuSearchRecommendations
  15. from script.search.xiaohongshu_search import XiaohongshuSearch
  16. class RunContext(BaseModel):
  17. version: str = Field(..., description="当前运行的脚本版本(文件名)")
  18. input_files: dict[str, str] = Field(..., description="输入文件路径映射")
  19. q_with_context: str
  20. q_context: str
  21. q: str
  22. log_url: str
  23. log_dir: str
  24. # 步骤化日志
  25. steps: list[dict] = Field(default_factory=list, description="执行步骤的详细记录")
  26. # 探索阶段记录(保留用于向后兼容)
  27. keywords: list[str] | None = Field(default=None, description="提取的关键词")
  28. exploration_levels: list[dict] = Field(default_factory=list, description="每一层的探索结果")
  29. level_analyses: list[dict] = Field(default_factory=list, description="每一层的主Agent分析")
  30. # 最终结果
  31. final_candidates: list[str] | None = Field(default=None, description="最终选出的候选query")
  32. evaluation_results: list[dict] | None = Field(default=None, description="候选query的评估结果")
  33. optimization_result: dict | None = Field(default=None, description="最终优化结果对象")
  34. final_output: str | None = Field(default=None, description="最终输出结果(格式化文本)")
  35. # ============================================================================
  36. # Agent 1: 关键词提取专家
  37. # ============================================================================
  38. keyword_extraction_instructions = """
  39. 你是关键词提取专家。给定一个搜索问题(含上下文),提取出**最细粒度的关键概念**。
  40. ## 提取原则
  41. 1. **细粒度优先**:拆分成最小的有意义单元
  42. - 不要保留完整的长句
  43. - 拆分成独立的、有搜索意义的词或短语
  44. 2. **保留核心维度**:
  45. - 地域/对象
  46. - 时间
  47. - 行为/意图:获取、教程、推荐、如何等
  48. - 主题/领域
  49. - 质量/属性
  50. 3. **去掉无意义的虚词**:的、吗、呢等
  51. 4. **保留领域专有词**:不要过度拆分专业术语
  52. - 如果是常见的组合词,保持完整
  53. ## 输出要求
  54. 输出关键词列表,按重要性排序(最核心的在前)。
  55. """.strip()
  56. class KeywordList(BaseModel):
  57. """关键词列表"""
  58. keywords: list[str] = Field(..., description="提取的关键词,按重要性排序")
  59. reasoning: str = Field(..., description="提取理由")
  60. keyword_extractor = Agent[None](
  61. name="关键词提取专家",
  62. instructions=keyword_extraction_instructions,
  63. model=get_model(MODEL_NAME),
  64. output_type=KeywordList,
  65. )
  66. # ============================================================================
  67. # Agent 2: 层级探索分析专家
  68. # ============================================================================
  69. level_analysis_instructions = """
  70. 你是搜索空间探索分析专家。基于当前层级的探索结果,决定下一步行动。
  71. ## 你的任务
  72. 分析当前已探索的词汇空间,判断:
  73. 1. **发现了什么有价值的信号?**
  74. 2. **是否已经可以评估候选了?**
  75. 3. **如果还不够,下一层应该探索什么组合?**
  76. ## 分析维度
  77. ### 1. 信号识别(最重要)
  78. 看推荐词里**出现了什么主题**:
  79. **关键问题:**
  80. - 哪些推荐词**最接近原始需求**?
  81. - 哪些推荐词**揭示了有价值的方向**(即使不完全匹配)?
  82. - 哪些推荐词可以作为**下一层探索的桥梁**?
  83. - 系统对哪些概念理解得好?哪些理解偏了?
  84. ### 2. 组合策略
  85. 基于发现的信号,设计下一层探索:
  86. **组合类型:**
  87. a) **关键词直接组合**
  88. - 两个关键词组合成新query
  89. b) **利用推荐词作为桥梁**(重要!)
  90. - 发现某个推荐词很有价值 → 直接探索这个推荐词
  91. - 或在推荐词基础上加其他关键词
  92. c) **跨层级组合**
  93. - 结合多层发现的有价值推荐词
  94. - 组合成更复杂的query
  95. ### 3. 停止条件
  96. **何时可以评估候选?**
  97. 满足以下之一:
  98. - 推荐词中出现了**明确包含原始需求多个核心要素的query**
  99. - 已经探索到**足够复杂的组合**(3-4个关键词),且推荐词相关
  100. - 探索了**3-4层**,信息已经足够丰富
  101. **何时继续探索?**
  102. - 当前推荐词太泛,没有接近原始需求
  103. - 发现了有价值的信号,但需要进一步组合验证
  104. - 层数还少(< 3层)
  105. ## 输出要求
  106. ### 1. key_findings
  107. 总结当前层发现的关键信息,包括:
  108. - 哪些推荐词最有价值?
  109. - 系统对哪些概念理解得好/不好?
  110. - 发现了什么意外的方向?
  111. ### 2. promising_signals
  112. 列出最有价值的推荐词(来自任何已探索的query),每个说明为什么有价值
  113. ### 3. should_evaluate_now
  114. 是否已经可以开始评估候选了?true/false
  115. ### 4. candidates_to_evaluate
  116. 如果should_evaluate_now=true,列出应该评估的候选query
  117. - 可以是推荐词
  118. - 可以是自己构造的组合
  119. ### 5. next_combinations
  120. 如果should_evaluate_now=false,列出下一层应该探索的query组合
  121. ### 6. reasoning
  122. 详细的推理过程
  123. ## 重要原则
  124. 1. **不要过早评估**:至少探索2层,除非第一层就发现了完美匹配
  125. 2. **充分利用推荐词**:推荐词是系统给的提示,要善用
  126. 3. **保持探索方向的多样性**:不要只盯着一个方向
  127. 4. **识别死胡同**:如果某个方向的推荐词一直不相关,果断放弃
  128. """.strip()
  129. class PromisingSignal(BaseModel):
  130. """有价值的推荐词信号"""
  131. query: str = Field(..., description="推荐词")
  132. from_level: int = Field(..., description="来自哪一层")
  133. reason: str = Field(..., description="为什么有价值")
  134. class LevelAnalysis(BaseModel):
  135. """层级分析结果"""
  136. key_findings: str = Field(..., description="当前层的关键发现")
  137. promising_signals: list[PromisingSignal] = Field(..., description="有价值的推荐词信号")
  138. should_evaluate_now: bool = Field(..., description="是否应该开始评估候选")
  139. candidates_to_evaluate: list[str] = Field(default_factory=list, description="如果should_evaluate_now=true,要评估的候选query列表")
  140. next_combinations: list[str] = Field(default_factory=list, description="如果should_evaluate_now=false,下一层要探索的query组合")
  141. reasoning: str = Field(..., description="详细的推理过程")
  142. level_analyzer = Agent[None](
  143. name="层级探索分析专家",
  144. instructions=level_analysis_instructions,
  145. model=get_model(MODEL_NAME),
  146. output_type=LevelAnalysis,
  147. )
  148. # ============================================================================
  149. # Agent 3: 评估专家(简化版:意图匹配 + 相关性评分)
  150. # ============================================================================
  151. eval_instructions = """
  152. 你是搜索query评估专家。给定原始问题和推荐query,评估两个维度。
  153. ## 评估目标
  154. 用这个推荐query搜索,能否找到满足原始需求的内容?
  155. ## 两层评分
  156. ### 1. intent_match(意图匹配)= true/false
  157. 推荐query的**使用意图**是否与原问题一致?
  158. **核心问题:用户搜索这个推荐词,想做什么?**
  159. **判断标准:**
  160. - 原问题意图:找方法?找教程?找资源/素材?找工具?看作品?
  161. - 推荐词意图:如果用户搜索这个词,他的目的是什么?
  162. **示例:**
  163. - 原问题意图="找素材"
  164. - ✅ true: "素材下载"、"素材网站"、"免费素材"(都是获取素材)
  165. - ❌ false: "素材制作教程"、"如何制作素材"(意图变成学习了)
  166. - 原问题意图="学教程"
  167. - ✅ true: "教程视频"、"教学步骤"、"入门指南"
  168. - ❌ false: "成品展示"、"作品欣赏"(意图变成看作品了)
  169. **评分:**
  170. - true = 意图一致,搜索推荐词能达到原问题的目的
  171. - false = 意图改变,搜索推荐词无法达到原问题的目的
  172. ### 2. relevance_score(相关性)= 0-1 连续分数
  173. 推荐query在**主题、要素、属性**上与原问题的相关程度?
  174. **评估维度:**
  175. - 主题相关:核心主题是否匹配?(如:摄影、旅游、美食)
  176. - 要素覆盖:关键要素保留了多少?(如:地域、时间、对象、工具)
  177. - 属性匹配:质量、风格、特色等属性是否保留?
  178. **评分参考:**
  179. - 0.9-1.0 = 几乎完美匹配,所有核心要素都保留
  180. - 0.7-0.8 = 高度相关,核心要素保留,少数次要要素缺失
  181. - 0.5-0.6 = 中度相关,主题匹配但多个要素缺失
  182. - 0.3-0.4 = 低度相关,只有部分主题相关
  183. - 0-0.2 = 基本不相关
  184. ## 评估策略
  185. 1. **先判断 intent_match**:意图不匹配直接 false,无论相关性多高
  186. 2. **再评估 relevance_score**:在意图匹配的前提下,计算相关性
  187. ## 输出要求
  188. - intent_match: true/false
  189. - relevance_score: 0-1 的浮点数
  190. - reason: 详细的评估理由,需要说明:
  191. - 原问题的意图是什么
  192. - 推荐词的意图是什么
  193. - 为什么判断意图匹配/不匹配
  194. - 相关性分数的依据(哪些要素保留/缺失)
  195. """.strip()
  196. class RelevanceEvaluation(BaseModel):
  197. """评估反馈模型 - 意图匹配 + 相关性"""
  198. intent_match: bool = Field(..., description="意图是否匹配")
  199. relevance_score: float = Field(..., description="相关性分数 0-1,分数越高越相关")
  200. reason: str = Field(..., description="评估理由,需说明意图判断和相关性依据")
  201. evaluator = Agent[None](
  202. name="评估专家",
  203. instructions=eval_instructions,
  204. model=get_model(MODEL_NAME),
  205. output_type=RelevanceEvaluation,
  206. )
  207. # ============================================================================
  208. # Agent 4: 单个帖子需求满足度评估专家
  209. # ============================================================================
  210. note_evaluation_instructions = """
  211. 你是帖子需求满足度评估专家。给定原始问题和一个搜索到的帖子(标题+描述),判断这个帖子能否满足用户的需求。
  212. ## 你的任务
  213. 评估单个帖子的标题和描述,判断用户点开这个帖子后,能否找到满足原始需求的内容。
  214. ## 评估维度
  215. ### 1. 标题相关性(title_relevance)= 0-1 连续分数
  216. **评估标准:**
  217. - 标题是否与原始问题的主题相关?
  218. - 标题是否包含原始问题的关键要素?
  219. - 标题是否表明内容能解决用户的问题?
  220. **评分参考:**
  221. - 0.9-1.0 = 标题高度相关,明确表明能解决问题
  222. - 0.7-0.8 = 标题相关,包含核心要素
  223. - 0.5-0.6 = 标题部分相关,有关联但不明确
  224. - 0.3-0.4 = 标题相关性较低
  225. - 0-0.2 = 标题基本不相关
  226. ### 2. 内容预期(content_expectation)= 0-1 连续分数
  227. **评估标准:**
  228. - 从描述看,内容是否可能包含用户需要的信息?
  229. - 描述是否展示了相关的要素或细节?
  230. - 描述的方向是否与用户需求一致?
  231. **评分参考:**
  232. - 0.9-1.0 = 描述明确表明内容高度符合需求
  233. - 0.7-0.8 = 描述显示内容可能符合需求
  234. - 0.5-0.6 = 描述有一定相关性,但不确定
  235. - 0.3-0.4 = 描述相关性较低
  236. - 0-0.2 = 描述基本不相关
  237. ### 3. 需求满足度(need_satisfaction)= true/false
  238. **核心问题:用户点开这个帖子后,能否找到他需要的内容?**
  239. **判断标准:**
  240. - 综合标题和描述,内容是否大概率能满足需求?
  241. - 如果 title_relevance >= 0.7 且 content_expectation >= 0.6,一般判断为 true
  242. - 否则判断为 false
  243. ### 4. 综合置信度(confidence_score)= 0-1 连续分数
  244. **计算方式:**
  245. - 可以是 title_relevance 和 content_expectation 的加权平均
  246. - 标题权重通常更高(如 0.6 * title + 0.4 * content)
  247. ## 输出要求
  248. - title_relevance: 0-1 的浮点数
  249. - content_expectation: 0-1 的浮点数
  250. - need_satisfaction: true/false
  251. - confidence_score: 0-1 的浮点数
  252. - reason: 详细的评估理由,需要说明:
  253. - 标题与原问题的相关性分析
  254. - 描述的内容预期分析
  255. - 为什么判断能/不能满足需求
  256. - 置信度分数的依据
  257. ## 重要原则
  258. 1. **独立评估**:只评估这一个帖子,不考虑其他帖子
  259. 2. **用户视角**:问"我会点开这个帖子吗?点开后能找到答案吗?"
  260. 3. **标题优先**:标题是用户决定是否点击的关键
  261. 4. **保守判断**:不确定时,倾向于给较低的分数
  262. """.strip()
  263. class NoteEvaluation(BaseModel):
  264. """单个帖子评估模型"""
  265. title_relevance: float = Field(..., description="标题相关性 0-1")
  266. content_expectation: float = Field(..., description="内容预期 0-1")
  267. need_satisfaction: bool = Field(..., description="是否满足需求")
  268. confidence_score: float = Field(..., description="综合置信度 0-1")
  269. reason: str = Field(..., description="详细的评估理由")
  270. note_evaluator = Agent[None](
  271. name="帖子需求满足度评估专家",
  272. instructions=note_evaluation_instructions,
  273. model=get_model(MODEL_NAME),
  274. output_type=NoteEvaluation,
  275. )
  276. # ============================================================================
  277. # Agent 5: 答案生成专家
  278. # ============================================================================
  279. answer_generation_instructions = """
  280. 你是答案生成专家。基于一组满足需求的帖子,为原始问题生成一个全面、准确、有价值的答案。
  281. ## 你的任务
  282. 根据用户的原始问题和一组相关帖子(包含标题、描述、置信度评分),生成一个高质量的答案。
  283. ## 输入信息
  284. 1. **原始问题**:用户提出的具体问题
  285. 2. **相关帖子列表**:每个帖子包含
  286. - 序号(索引)
  287. - 标题
  288. - 描述
  289. - 置信度分数
  290. ## 答案要求
  291. ### 1. 内容要求
  292. - **直接回答问题**:开门见山,第一段就给出核心答案
  293. - **结构清晰**:使用标题、列表、分段等组织内容
  294. - **综合多个来源**:整合多个帖子的信息,不要只依赖一个
  295. - **信息准确**:基于帖子内容,不要编造信息
  296. - **实用性**:提供可操作的建议或具体的信息
  297. ### 2. 引用规范
  298. - **必须标注来源**:每个关键信息都要标注帖子索引
  299. - **引用格式**:使用 `[1]`、`[2]` 等标注帖子序号
  300. - **多来源引用**:如果多个帖子支持同一观点,使用 `[1,2,3]`
  301. - **引用位置**:在相关句子或段落的末尾标注
  302. ### 3. 置信度使用
  303. - **优先高置信度**:优先引用置信度高的帖子
  304. - **交叉验证**:如果多个帖子提到相同信息,可以提高可信度
  305. - **标注不确定性**:如果信息来自低置信度帖子,适当标注
  306. ### 4. 答案结构建议
  307. ```
  308. 【核心答案】
  309. 直接回答用户的问题,给出最核心的信息。[引用]
  310. 【详细说明】
  311. 1. 第一个方面/要点 [引用]
  312. - 具体内容
  313. - 相关细节
  314. 2. 第二个方面/要点 [引用]
  315. - 具体内容
  316. - 相关细节
  317. 【补充建议/注意事项】(可选)
  318. 其他有价值的信息或提醒。[引用]
  319. 【参考帖子】
  320. 列出所有引用的帖子编号和标题。
  321. ```
  322. ## 输出格式
  323. {
  324. "answer": "生成的答案内容(Markdown格式)",
  325. "cited_note_indices": [1, 2, 3], # 引用的帖子序号列表
  326. "confidence": 0.85, # 答案的整体置信度 (0-1)
  327. "summary": "一句话总结答案的核心内容"
  328. }
  329. ## 重要原则
  330. 1. **忠于原文**:不要添加帖子中没有的信息
  331. 2. **引用透明**:让用户知道每个信息来自哪个帖子
  332. 3. **综合性**:尽可能整合多个帖子的信息
  333. 4. **可读性**:使用清晰的Markdown格式
  334. 5. **质量优先**:如果帖子质量不够,可以说明信息有限
  335. """.strip()
  336. class AnswerGeneration(BaseModel):
  337. """答案生成模型"""
  338. answer: str = Field(..., description="生成的答案内容(Markdown格式)")
  339. cited_note_indices: list[int] = Field(..., description="引用的帖子序号列表")
  340. confidence: float = Field(..., description="答案的整体置信度 0-1")
  341. summary: str = Field(..., description="一句话总结答案的核心内容")
  342. answer_generator = Agent[None](
  343. name="答案生成专家",
  344. instructions=answer_generation_instructions,
  345. model=get_model(MODEL_NAME),
  346. output_type=AnswerGeneration,
  347. )
  348. # ============================================================================
  349. # 日志辅助函数
  350. # ============================================================================
  351. def add_step(context: RunContext, step_name: str, step_type: str, data: dict):
  352. """添加步骤记录"""
  353. step = {
  354. "step_number": len(context.steps) + 1,
  355. "step_name": step_name,
  356. "step_type": step_type,
  357. "timestamp": datetime.now().isoformat(),
  358. "data": data
  359. }
  360. context.steps.append(step)
  361. return step
  362. def process_note_data(note: dict) -> dict:
  363. """
  364. 处理搜索接口返回的帖子数据,标准化为统一格式
  365. Args:
  366. note: 搜索接口返回的原始帖子数据
  367. Returns:
  368. 标准化后的帖子数据字典
  369. """
  370. note_card = note.get("note_card", {})
  371. image_list = note_card.get("image_list", []) # 已在搜索API层预处理为URL字符串列表
  372. interact_info = note_card.get("interact_info", {})
  373. user_info = note_card.get("user", {})
  374. return {
  375. "note_id": note.get("id", ""),
  376. "title": note_card.get("display_title", ""),
  377. "desc": note_card.get("desc", ""),
  378. "image_list": image_list, # 第一张就是封面,已在XiaohongshuSearch.search()中预处理为URL字符串列表
  379. "interact_info": {
  380. "liked_count": interact_info.get("liked_count", 0),
  381. "collected_count": interact_info.get("collected_count", 0),
  382. "comment_count": interact_info.get("comment_count", 0),
  383. "shared_count": interact_info.get("shared_count", 0)
  384. },
  385. "user": {
  386. "nickname": user_info.get("nickname", ""),
  387. "user_id": user_info.get("user_id", "")
  388. },
  389. "type": note_card.get("type", "normal"),
  390. "note_url": f"https://www.xiaohongshu.com/explore/{note.get('id', '')}"
  391. }
  392. # ============================================================================
  393. # 核心函数
  394. # ============================================================================
  395. async def extract_keywords(q: str, context: RunContext) -> KeywordList:
  396. """提取关键词"""
  397. print("\n[步骤 1] 正在提取关键词...")
  398. result = await Runner.run(keyword_extractor, q)
  399. keyword_list: KeywordList = result.final_output
  400. print(f"提取的关键词:{keyword_list.keywords}")
  401. print(f"提取理由:{keyword_list.reasoning}")
  402. # 记录步骤
  403. add_step(context, "提取关键词", "keyword_extraction", {
  404. "input_question": q,
  405. "keywords": keyword_list.keywords,
  406. "reasoning": keyword_list.reasoning
  407. })
  408. return keyword_list
  409. async def explore_level(queries: list[str], level_num: int, context: RunContext) -> dict:
  410. """探索一个层级(并发获取所有query的推荐词)"""
  411. step_num = len(context.steps) + 1
  412. print(f"\n{'='*60}")
  413. print(f"[步骤 {step_num}] Level {level_num} 探索:{len(queries)} 个query")
  414. print(f"{'='*60}")
  415. xiaohongshu_api = XiaohongshuSearchRecommendations()
  416. # 并发获取所有推荐词
  417. async def get_single_sug(query: str):
  418. print(f" 探索: {query}")
  419. suggestions = xiaohongshu_api.get_recommendations(keyword=query)
  420. print(f" → {len(suggestions) if suggestions else 0} 个推荐词")
  421. return {
  422. "query": query,
  423. "suggestions": suggestions or []
  424. }
  425. results = await asyncio.gather(*[get_single_sug(q) for q in queries])
  426. level_data = {
  427. "level": level_num,
  428. "timestamp": datetime.now().isoformat(),
  429. "queries": results
  430. }
  431. context.exploration_levels.append(level_data)
  432. # 记录步骤
  433. add_step(context, f"Level {level_num} 探索", "level_exploration", {
  434. "level": level_num,
  435. "input_queries": queries,
  436. "query_count": len(queries),
  437. "results": results,
  438. "total_suggestions": sum(len(r['suggestions']) for r in results)
  439. })
  440. return level_data
  441. async def analyze_level(level_data: dict, all_levels: list[dict], original_question: str, context: RunContext) -> LevelAnalysis:
  442. """分析当前层级,决定下一步"""
  443. step_num = len(context.steps) + 1
  444. print(f"\n[步骤 {step_num}] 正在分析 Level {level_data['level']}...")
  445. # 构造输入
  446. analysis_input = f"""
  447. <原始问题>
  448. {original_question}
  449. </原始问题>
  450. <已探索的所有层级>
  451. {json.dumps(all_levels, ensure_ascii=False, indent=2)}
  452. </已探索的所有层级>
  453. <当前层级>
  454. Level {level_data['level']}
  455. {json.dumps(level_data['queries'], ensure_ascii=False, indent=2)}
  456. </当前层级>
  457. 请分析当前探索状态,决定下一步行动。
  458. """
  459. result = await Runner.run(level_analyzer, analysis_input)
  460. analysis: LevelAnalysis = result.final_output
  461. print(f"\n分析结果:")
  462. print(f" 关键发现:{analysis.key_findings}")
  463. print(f" 有价值的信号:{len(analysis.promising_signals)} 个")
  464. print(f" 是否评估:{analysis.should_evaluate_now}")
  465. if analysis.should_evaluate_now:
  466. print(f" 候选query:{analysis.candidates_to_evaluate}")
  467. else:
  468. print(f" 下一层探索:{analysis.next_combinations}")
  469. # 保存分析结果
  470. context.level_analyses.append({
  471. "level": level_data['level'],
  472. "timestamp": datetime.now().isoformat(),
  473. "analysis": analysis.model_dump()
  474. })
  475. # 记录步骤
  476. add_step(context, f"Level {level_data['level']} 分析", "level_analysis", {
  477. "level": level_data['level'],
  478. "key_findings": analysis.key_findings,
  479. "promising_signals_count": len(analysis.promising_signals),
  480. "promising_signals": [s.model_dump() for s in analysis.promising_signals],
  481. "should_evaluate_now": analysis.should_evaluate_now,
  482. "candidates_to_evaluate": analysis.candidates_to_evaluate if analysis.should_evaluate_now else [],
  483. "next_combinations": analysis.next_combinations if not analysis.should_evaluate_now else [],
  484. "reasoning": analysis.reasoning
  485. })
  486. return analysis
  487. async def evaluate_candidates(candidates: list[str], original_question: str, context: RunContext) -> list[dict]:
  488. """评估候选query(含实际搜索验证)"""
  489. step_num = len(context.steps) + 1
  490. print(f"\n{'='*60}")
  491. print(f"[步骤 {step_num}] 评估 {len(candidates)} 个候选query")
  492. print(f"{'='*60}")
  493. xiaohongshu_api = XiaohongshuSearchRecommendations()
  494. xiaohongshu_search = XiaohongshuSearch()
  495. # 创建搜索结果保存目录
  496. search_results_dir = os.path.join(context.log_dir, "search_results")
  497. os.makedirs(search_results_dir, exist_ok=True)
  498. async def evaluate_single_candidate(candidate: str, candidate_index: int):
  499. print(f"\n评估候选:{candidate}")
  500. # 为当前候选创建子目录
  501. # 清理候选名称,移除不适合作为目录名的字符
  502. safe_candidate_name = "".join(c if c.isalnum() or c in (' ', '_', '-') else '_' for c in candidate)
  503. candidate_dir = os.path.join(search_results_dir, f"candidate_{candidate_index+1}_{safe_candidate_name[:50]}")
  504. os.makedirs(candidate_dir, exist_ok=True)
  505. # 1. 获取推荐词
  506. suggestions = xiaohongshu_api.get_recommendations(keyword=candidate)
  507. print(f" 获取到 {len(suggestions) if suggestions else 0} 个推荐词")
  508. if not suggestions:
  509. return {
  510. "candidate": candidate,
  511. "suggestions": [],
  512. "evaluations": []
  513. }
  514. # 2. 评估每个推荐词(意图匹配 + 相关性)
  515. async def eval_single_sug(sug: str, sug_index: int):
  516. # 2.1 先进行意图和相关性评估
  517. eval_input = f"""
  518. <原始问题>
  519. {original_question}
  520. </原始问题>
  521. <待评估的推荐query>
  522. {sug}
  523. </待评估的推荐query>
  524. 请评估该推荐query:
  525. 1. intent_match: 意图是否匹配(true/false)
  526. 2. relevance_score: 相关性分数(0-1)
  527. 3. reason: 详细的评估理由
  528. """
  529. result = await Runner.run(evaluator, eval_input)
  530. evaluation: RelevanceEvaluation = result.final_output
  531. eval_result = {
  532. "query": sug,
  533. "intent_match": evaluation.intent_match,
  534. "relevance_score": evaluation.relevance_score,
  535. "reason": evaluation.reason,
  536. }
  537. # 2.2 如果意图匹配且相关性足够高,进行实际搜索验证
  538. if evaluation.intent_match and evaluation.relevance_score >= 0.7:
  539. print(f" → 合格候选,进行实际搜索验证: {sug}")
  540. try:
  541. search_result = xiaohongshu_search.search(keyword=sug)
  542. # 解析result字段(它是JSON字符串,需要先解析)
  543. result_str = search_result.get("result", "{}")
  544. if isinstance(result_str, str):
  545. result_data = json.loads(result_str)
  546. else:
  547. result_data = result_str
  548. # 格式化搜索结果:将result字段解析后再保存
  549. formatted_search_result = {
  550. "success": search_result.get("success"),
  551. "result": result_data, # 保存解析后的数据
  552. "tool_name": search_result.get("tool_name"),
  553. "call_type": search_result.get("call_type"),
  554. "query": sug,
  555. "timestamp": datetime.now().isoformat()
  556. }
  557. # 保存格式化后的搜索结果到文件
  558. safe_sug_name = "".join(c if c.isalnum() or c in (' ', '_', '-') else '_' for c in sug)
  559. search_result_file = os.path.join(candidate_dir, f"sug_{sug_index+1}_{safe_sug_name[:30]}.json")
  560. with open(search_result_file, 'w', encoding='utf-8') as f:
  561. json.dump(formatted_search_result, f, ensure_ascii=False, indent=2)
  562. print(f" 搜索结果已保存: {os.path.basename(search_result_file)}")
  563. # 提取搜索结果的标题和描述
  564. # 正确的数据路径: result.data.data[]
  565. notes = result_data.get("data", {}).get("data", [])
  566. if notes:
  567. print(f" 开始评估 {len(notes)} 个帖子...")
  568. # 对每个帖子进行独立评估
  569. note_evaluations = []
  570. for note_idx, note in enumerate(notes, 1): # 评估所有帖子
  571. note_card = note.get("note_card", {})
  572. title = note_card.get("display_title", "")
  573. desc = note_card.get("desc", "")
  574. note_id = note.get("id", "")
  575. # 构造评估输入
  576. eval_input = f"""
  577. <原始问题>
  578. {original_question}
  579. </原始问题>
  580. <帖子信息>
  581. 标题: {title}
  582. 描述: {desc}
  583. </帖子信息>
  584. 请评估这个帖子能否满足用户需求。
  585. """
  586. # 调用评估Agent
  587. eval_result_run = await Runner.run(note_evaluator, eval_input)
  588. note_eval: NoteEvaluation = eval_result_run.final_output
  589. note_evaluation_record = {
  590. "note_index": note_idx,
  591. "note_id": note_id,
  592. "title": title,
  593. "desc": desc, # 保存完整描述
  594. "evaluation": {
  595. "title_relevance": note_eval.title_relevance,
  596. "content_expectation": note_eval.content_expectation,
  597. "need_satisfaction": note_eval.need_satisfaction,
  598. "confidence_score": note_eval.confidence_score,
  599. "reason": note_eval.reason
  600. }
  601. }
  602. note_evaluations.append(note_evaluation_record)
  603. # 简单打印进度
  604. if note_idx % 3 == 0 or note_idx == len(notes):
  605. print(f" 已评估 {note_idx}/{len(notes)} 个帖子")
  606. # 统计满足需求的帖子数量
  607. satisfied_count = sum(1 for ne in note_evaluations if ne["evaluation"]["need_satisfaction"])
  608. avg_confidence = sum(ne["evaluation"]["confidence_score"] for ne in note_evaluations) / len(note_evaluations) if note_evaluations else 0
  609. eval_result["search_verification"] = {
  610. "total_notes": len(notes),
  611. "evaluated_notes": len(note_evaluations),
  612. "satisfied_count": satisfied_count,
  613. "average_confidence": round(avg_confidence, 2),
  614. "note_evaluations": note_evaluations,
  615. "search_result_file": search_result_file
  616. }
  617. print(f" 评估完成: {satisfied_count}/{len(note_evaluations)} 个帖子满足需求, "
  618. f"平均置信度={avg_confidence:.2f}")
  619. else:
  620. eval_result["search_verification"] = {
  621. "total_notes": 0,
  622. "evaluated_notes": 0,
  623. "satisfied_count": 0,
  624. "average_confidence": 0.0,
  625. "note_evaluations": [],
  626. "search_result_file": search_result_file,
  627. "reason": "搜索无结果"
  628. }
  629. print(f" 搜索无结果")
  630. except Exception as e:
  631. print(f" 搜索验证出错: {e}")
  632. eval_result["search_verification"] = {
  633. "error": str(e)
  634. }
  635. return eval_result
  636. evaluations = await asyncio.gather(*[eval_single_sug(s, i) for i, s in enumerate(suggestions)])
  637. return {
  638. "candidate": candidate,
  639. "suggestions": suggestions,
  640. "evaluations": evaluations
  641. }
  642. results = await asyncio.gather(*[evaluate_single_candidate(c, i) for i, c in enumerate(candidates)])
  643. # 生成搜索结果汇总文件
  644. summary_data = {
  645. "original_question": original_question,
  646. "timestamp": datetime.now().isoformat(),
  647. "total_candidates": len(candidates),
  648. "candidates": []
  649. }
  650. for i, result in enumerate(results):
  651. candidate_summary = {
  652. "index": i + 1,
  653. "candidate": result["candidate"],
  654. "suggestions_count": len(result["suggestions"]),
  655. "verified_queries": []
  656. }
  657. for eval_item in result.get("evaluations", []):
  658. if "search_verification" in eval_item and "search_result_file" in eval_item["search_verification"]:
  659. sv = eval_item["search_verification"]
  660. candidate_summary["verified_queries"].append({
  661. "query": eval_item["query"],
  662. "intent_match": eval_item["intent_match"],
  663. "relevance_score": eval_item["relevance_score"],
  664. "verification": {
  665. "total_notes": sv.get("total_notes", 0),
  666. "evaluated_notes": sv.get("evaluated_notes", 0),
  667. "satisfied_count": sv.get("satisfied_count", 0),
  668. "average_confidence": sv.get("average_confidence", 0.0)
  669. },
  670. "search_result_file": sv["search_result_file"]
  671. })
  672. summary_data["candidates"].append(candidate_summary)
  673. # 保存汇总文件
  674. summary_file = os.path.join(search_results_dir, "summary.json")
  675. with open(summary_file, 'w', encoding='utf-8') as f:
  676. json.dump(summary_data, f, ensure_ascii=False, indent=2)
  677. print(f"\n搜索结果汇总已保存: {summary_file}")
  678. context.evaluation_results = results
  679. # 构建详细的步骤记录数据
  680. step_data = {
  681. "candidate_count": len(candidates),
  682. "candidates": candidates,
  683. "total_evaluations": sum(len(r['evaluations']) for r in results),
  684. "verified_queries": sum(
  685. 1 for r in results
  686. for e in r.get('evaluations', [])
  687. if 'search_verification' in e
  688. ),
  689. "search_results_dir": search_results_dir,
  690. "summary_file": summary_file,
  691. "detailed_results": []
  692. }
  693. # 为每个候选记录详细信息
  694. for result in results:
  695. candidate_detail = {
  696. "candidate": result["candidate"],
  697. "suggestions": result["suggestions"],
  698. "evaluations": []
  699. }
  700. for eval_item in result.get("evaluations", []):
  701. eval_detail = {
  702. "query": eval_item["query"],
  703. "intent_match": eval_item["intent_match"],
  704. "relevance_score": eval_item["relevance_score"],
  705. "reason": eval_item["reason"]
  706. }
  707. # 如果有搜索验证,添加详细信息
  708. if "search_verification" in eval_item:
  709. verification = eval_item["search_verification"]
  710. eval_detail["search_verification"] = {
  711. "performed": True,
  712. "total_notes": verification.get("total_notes", 0),
  713. "evaluated_notes": verification.get("evaluated_notes", 0),
  714. "satisfied_count": verification.get("satisfied_count", 0),
  715. "average_confidence": verification.get("average_confidence", 0.0),
  716. "search_result_file": verification.get("search_result_file"),
  717. "has_error": "error" in verification
  718. }
  719. if "error" in verification:
  720. eval_detail["search_verification"]["error"] = verification["error"]
  721. # 保存每个帖子的评估详情
  722. if "note_evaluations" in verification:
  723. eval_detail["search_verification"]["note_evaluations"] = verification["note_evaluations"]
  724. else:
  725. eval_detail["search_verification"] = {
  726. "performed": False,
  727. "reason": "未达到搜索验证阈值(intent_match=False 或 relevance_score<0.7)"
  728. }
  729. candidate_detail["evaluations"].append(eval_detail)
  730. step_data["detailed_results"].append(candidate_detail)
  731. # 记录步骤
  732. add_step(context, "评估候选query", "candidate_evaluation", step_data)
  733. return results
  734. # ============================================================================
  735. # 新的独立步骤函数(方案A)
  736. # ============================================================================
  737. async def step_evaluate_query_suggestions(candidates: list[str], original_question: str, context: RunContext) -> list[dict]:
  738. """
  739. 步骤1: 评估候选query的推荐词
  740. 输入:
  741. - candidates: 候选query列表
  742. - original_question: 原始问题
  743. - context: 运行上下文
  744. 输出:
  745. - 每个候选的评估结果列表,包含:
  746. - candidate: 候选query
  747. - suggestions: 推荐词列表
  748. - evaluations: 每个推荐词的意图匹配和相关性评分
  749. """
  750. step_num = len(context.steps) + 1
  751. print(f"\n{'='*60}")
  752. print(f"[步骤 {step_num}] 评估 {len(candidates)} 个候选query的推荐词")
  753. print(f"{'='*60}")
  754. xiaohongshu_api = XiaohongshuSearchRecommendations()
  755. async def evaluate_single_candidate(candidate: str):
  756. print(f"\n评估候选:{candidate}")
  757. # 1. 获取推荐词
  758. suggestions = xiaohongshu_api.get_recommendations(keyword=candidate)
  759. print(f" 获取到 {len(suggestions) if suggestions else 0} 个推荐词")
  760. if not suggestions:
  761. return {
  762. "candidate": candidate,
  763. "suggestions": [],
  764. "evaluations": []
  765. }
  766. # 2. 评估每个推荐词(只做意图匹配和相关性评分)
  767. async def eval_single_sug(sug: str):
  768. eval_input = f"""
  769. <原始问题>
  770. {original_question}
  771. </原始问题>
  772. <待评估的推荐query>
  773. {sug}
  774. </待评估的推荐query>
  775. 请评估该推荐query:
  776. 1. intent_match: 意图是否匹配(true/false)
  777. 2. relevance_score: 相关性分数(0-1)
  778. 3. reason: 详细的评估理由
  779. """
  780. result = await Runner.run(evaluator, eval_input)
  781. evaluation: RelevanceEvaluation = result.final_output
  782. return {
  783. "query": sug,
  784. "intent_match": evaluation.intent_match,
  785. "relevance_score": evaluation.relevance_score,
  786. "reason": evaluation.reason
  787. }
  788. evaluations = await asyncio.gather(*[eval_single_sug(s) for s in suggestions])
  789. return {
  790. "candidate": candidate,
  791. "suggestions": suggestions,
  792. "evaluations": evaluations
  793. }
  794. results = await asyncio.gather(*[evaluate_single_candidate(c) for c in candidates])
  795. # 记录步骤
  796. add_step(context, "评估候选query的推荐词", "query_suggestion_evaluation", {
  797. "candidate_count": len(candidates),
  798. "candidates": candidates,
  799. "results": results,
  800. "total_evaluations": sum(len(r['evaluations']) for r in results),
  801. "qualified_count": sum(
  802. 1 for r in results
  803. for e in r['evaluations']
  804. if e['intent_match'] and e['relevance_score'] >= 0.7
  805. )
  806. })
  807. return results
  808. def step_filter_qualified_queries(evaluation_results: list[dict], context: RunContext, min_relevance_score: float = 0.7) -> list[dict]:
  809. """
  810. 步骤1.5: 筛选合格的推荐词
  811. 输入:
  812. - evaluation_results: 步骤1的评估结果
  813. 输出:
  814. - 合格的query列表,每个包含:
  815. - query: 推荐词
  816. - from_candidate: 来源候选
  817. - intent_match: 意图匹配
  818. - relevance_score: 相关性分数
  819. - reason: 评估理由
  820. """
  821. step_num = len(context.steps) + 1
  822. print(f"\n{'='*60}")
  823. print(f"[步骤 {step_num}] 筛选合格的推荐词")
  824. print(f"{'='*60}")
  825. qualified_queries = []
  826. all_queries = [] # 保存所有查询,包括不合格的
  827. for result in evaluation_results:
  828. candidate = result["candidate"]
  829. for eval_item in result.get("evaluations", []):
  830. query_data = {
  831. "query": eval_item["query"],
  832. "from_candidate": candidate,
  833. "intent_match": eval_item["intent_match"],
  834. "relevance_score": eval_item["relevance_score"],
  835. "reason": eval_item["reason"]
  836. }
  837. # 判断是否合格
  838. is_qualified = (eval_item['intent_match'] is True
  839. and eval_item['relevance_score'] >= min_relevance_score)
  840. query_data["is_qualified"] = is_qualified
  841. all_queries.append(query_data)
  842. if is_qualified:
  843. qualified_queries.append(query_data)
  844. # 按相关性分数降序排列
  845. qualified_queries.sort(key=lambda x: x['relevance_score'], reverse=True)
  846. all_queries.sort(key=lambda x: x['relevance_score'], reverse=True)
  847. print(f"\n找到 {len(qualified_queries)} 个合格的推荐词 (共评估 {len(all_queries)} 个)")
  848. if qualified_queries:
  849. print(f"相关性分数范围: {qualified_queries[0]['relevance_score']:.2f} ~ {qualified_queries[-1]['relevance_score']:.2f}")
  850. print("\n合格的推荐词:")
  851. for idx, q in enumerate(qualified_queries[:5], 1):
  852. print(f" {idx}. {q['query']} (分数: {q['relevance_score']:.2f})")
  853. if len(qualified_queries) > 5:
  854. print(f" ... 还有 {len(qualified_queries) - 5} 个")
  855. # 记录步骤 - 保存所有查询数据
  856. add_step(context, "筛选合格的推荐词", "filter_qualified_queries", {
  857. "input_evaluation_count": len(all_queries),
  858. "min_relevance_score": min_relevance_score,
  859. "qualified_count": len(qualified_queries),
  860. "qualified_queries": qualified_queries,
  861. "all_queries": all_queries # 新增:保存所有查询数据
  862. })
  863. return qualified_queries
  864. async def step_search_qualified_queries(qualified_queries: list[dict], context: RunContext) -> dict:
  865. """
  866. 步骤2: 搜索合格的推荐词
  867. 输入:
  868. - qualified_queries: 步骤1.5筛选出的合格query列表,每个包含:
  869. - query: 推荐词
  870. - from_candidate: 来源候选
  871. - intent_match, relevance_score, reason
  872. 输出:
  873. - 搜索结果字典,包含:
  874. - searches: 每个query的搜索结果列表
  875. - search_results_dir: 搜索结果保存目录
  876. """
  877. step_num = len(context.steps) + 1
  878. print(f"\n{'='*60}")
  879. print(f"[步骤 {step_num}] 搜索 {len(qualified_queries)} 个合格的推荐词")
  880. print(f"{'='*60}")
  881. if not qualified_queries:
  882. add_step(context, "搜索合格的推荐词", "search_qualified_queries", {
  883. "qualified_count": 0,
  884. "searches": []
  885. })
  886. return {"searches": [], "search_results_dir": None}
  887. # 创建搜索结果保存目录
  888. search_results_dir = os.path.join(context.log_dir, "search_results")
  889. os.makedirs(search_results_dir, exist_ok=True)
  890. xiaohongshu_search = XiaohongshuSearch()
  891. # 搜索每个合格的query
  892. async def search_single_query(query_info: dict, query_index: int):
  893. query = query_info['query']
  894. print(f"\n搜索 [{query_index+1}/{len(qualified_queries)}]: {query}")
  895. try:
  896. # 执行搜索
  897. search_result = xiaohongshu_search.search(keyword=query)
  898. # 解析result字段
  899. result_str = search_result.get("result", "{}")
  900. if isinstance(result_str, str):
  901. result_data = json.loads(result_str)
  902. else:
  903. result_data = result_str
  904. # 格式化搜索结果
  905. formatted_search_result = {
  906. "success": search_result.get("success"),
  907. "result": result_data,
  908. "tool_name": search_result.get("tool_name"),
  909. "call_type": search_result.get("call_type"),
  910. "query": query,
  911. "timestamp": datetime.now().isoformat()
  912. }
  913. # 保存到文件
  914. safe_query_name = "".join(c if c.isalnum() or c in (' ', '_', '-') else '_' for c in query)
  915. query_dir = os.path.join(search_results_dir, f"query_{query_index+1}_{safe_query_name[:50]}")
  916. os.makedirs(query_dir, exist_ok=True)
  917. search_result_file = os.path.join(query_dir, "search_result.json")
  918. with open(search_result_file, 'w', encoding='utf-8') as f:
  919. json.dump(formatted_search_result, f, ensure_ascii=False, indent=2)
  920. # 提取帖子列表
  921. notes = result_data.get("data", {}).get("data", [])
  922. print(f" → 搜索成功,获得 {len(notes)} 个帖子")
  923. # ⭐ 提取帖子摘要信息用于steps.json
  924. notes_summary = [process_note_data(note) for note in notes]
  925. return {
  926. "query": query,
  927. "from_candidate": query_info['from_candidate'],
  928. "intent_match": query_info['intent_match'],
  929. "relevance_score": query_info['relevance_score'],
  930. "reason": query_info['reason'],
  931. "search_result_file": search_result_file,
  932. "note_count": len(notes),
  933. "notes": notes, # 保存所有帖子用于评估
  934. "notes_summary": notes_summary # ⭐ 保存到steps.json
  935. }
  936. except Exception as e:
  937. print(f" → 搜索失败: {e}")
  938. return {
  939. "query": query,
  940. "from_candidate": query_info['from_candidate'],
  941. "intent_match": query_info['intent_match'],
  942. "relevance_score": query_info['relevance_score'],
  943. "reason": query_info['reason'],
  944. "error": str(e),
  945. "note_count": 0,
  946. "notes": []
  947. }
  948. search_results = await asyncio.gather(*[search_single_query(q, i) for i, q in enumerate(qualified_queries)])
  949. # 记录步骤
  950. add_step(context, "搜索合格的推荐词", "search_qualified_queries", {
  951. "qualified_count": len(qualified_queries),
  952. "search_results": [
  953. {
  954. "query": sr['query'],
  955. "from_candidate": sr['from_candidate'],
  956. "note_count": sr['note_count'],
  957. "search_result_file": sr.get('search_result_file'),
  958. "has_error": 'error' in sr,
  959. "notes_summary": sr.get('notes_summary', []) # ⭐ 包含帖子摘要
  960. }
  961. for sr in search_results
  962. ],
  963. "search_results_dir": search_results_dir
  964. })
  965. return {
  966. "searches": search_results,
  967. "search_results_dir": search_results_dir
  968. }
  969. async def step_evaluate_search_notes(search_data: dict, original_question: str, context: RunContext) -> dict:
  970. """
  971. 步骤3: 评估搜索到的帖子
  972. 输入:
  973. - search_data: 步骤2的搜索结果,包含:
  974. - searches: 搜索结果列表
  975. - search_results_dir: 结果目录
  976. 输出:
  977. - 帖子评估结果字典,包含:
  978. - note_evaluations: 每个query的帖子评估列表
  979. """
  980. step_num = len(context.steps) + 1
  981. print(f"\n{'='*60}")
  982. print(f"[步骤 {step_num}] 评估搜索到的帖子")
  983. print(f"{'='*60}")
  984. search_results = search_data['searches']
  985. if not search_results:
  986. add_step(context, "评估搜索到的帖子", "evaluate_search_notes", {
  987. "query_count": 0,
  988. "total_notes": 0,
  989. "evaluated_notes": 0,
  990. "note_evaluations": []
  991. })
  992. return {"note_evaluations": []}
  993. # 对每个query的帖子进行评估
  994. async def evaluate_query_notes(search_result: dict, query_index: int):
  995. query = search_result['query']
  996. notes = search_result.get('notes', [])
  997. if not notes or 'error' in search_result:
  998. return {
  999. "query": query,
  1000. "from_candidate": search_result['from_candidate'],
  1001. "note_count": 0,
  1002. "evaluated_notes": [],
  1003. "satisfied_count": 0,
  1004. "average_confidence": 0.0
  1005. }
  1006. print(f"\n评估query [{query_index+1}]: {query} ({len(notes)} 个帖子)")
  1007. # 评估每个帖子
  1008. note_evaluations = []
  1009. for note_idx, note in enumerate(notes, 1):
  1010. # 使用标准化函数处理帖子数据
  1011. note_data = process_note_data(note)
  1012. title = note_data["title"]
  1013. desc = note_data["desc"]
  1014. # 调用评估Agent
  1015. eval_input = f"""
  1016. <原始问题>
  1017. {original_question}
  1018. </原始问题>
  1019. <帖子信息>
  1020. 标题: {title}
  1021. 描述: {desc}
  1022. </帖子信息>
  1023. 请评估这个帖子能否满足用户需求。
  1024. """
  1025. eval_result_run = await Runner.run(note_evaluator, eval_input)
  1026. note_eval: NoteEvaluation = eval_result_run.final_output
  1027. # 合并标准化的帖子数据和评估结果
  1028. note_evaluations.append({
  1029. **note_data, # 包含所有标准化字段
  1030. "note_index": note_idx,
  1031. "evaluation": {
  1032. "title_relevance": note_eval.title_relevance,
  1033. "content_expectation": note_eval.content_expectation,
  1034. "need_satisfaction": note_eval.need_satisfaction,
  1035. "confidence_score": note_eval.confidence_score,
  1036. "reason": note_eval.reason
  1037. }
  1038. })
  1039. if note_idx % 3 == 0 or note_idx == len(notes):
  1040. print(f" 已评估 {note_idx}/{len(notes)} 个帖子")
  1041. # 统计
  1042. satisfied_count = sum(1 for ne in note_evaluations if ne["evaluation"]["need_satisfaction"])
  1043. avg_confidence = sum(ne["evaluation"]["confidence_score"] for ne in note_evaluations) / len(note_evaluations) if note_evaluations else 0
  1044. print(f" → 完成:{satisfied_count}/{len(note_evaluations)} 个帖子满足需求")
  1045. return {
  1046. "query": query,
  1047. "from_candidate": search_result['from_candidate'],
  1048. "note_count": len(notes),
  1049. "evaluated_notes": note_evaluations,
  1050. "satisfied_count": satisfied_count,
  1051. "average_confidence": round(avg_confidence, 2)
  1052. }
  1053. # 并发评估所有query的帖子
  1054. all_evaluations = await asyncio.gather(*[evaluate_query_notes(sr, i) for i, sr in enumerate(search_results, 1)])
  1055. # 记录步骤
  1056. total_notes = sum(e['note_count'] for e in all_evaluations)
  1057. total_satisfied = sum(e['satisfied_count'] for e in all_evaluations)
  1058. add_step(context, "评估搜索到的帖子", "evaluate_search_notes", {
  1059. "query_count": len(search_results),
  1060. "total_notes": total_notes,
  1061. "total_satisfied": total_satisfied,
  1062. "note_evaluations": all_evaluations
  1063. })
  1064. return {"note_evaluations": all_evaluations}
  1065. def step_collect_satisfied_notes(note_evaluation_data: dict) -> list[dict]:
  1066. """
  1067. 步骤4: 汇总所有满足需求的帖子
  1068. 输入:
  1069. - note_evaluation_data: 步骤3的帖子评估结果
  1070. 输出:
  1071. - 所有满足需求的帖子列表,按置信度降序排列
  1072. """
  1073. print(f"\n{'='*60}")
  1074. print(f"汇总满足需求的帖子")
  1075. print(f"{'='*60}")
  1076. all_satisfied_notes = []
  1077. for query_eval in note_evaluation_data['note_evaluations']:
  1078. for note in query_eval['evaluated_notes']:
  1079. if note['evaluation']['need_satisfaction']:
  1080. all_satisfied_notes.append({
  1081. "query": query_eval['query'],
  1082. "from_candidate": query_eval['from_candidate'],
  1083. "note_id": note['note_id'],
  1084. "title": note['title'],
  1085. "desc": note['desc'],
  1086. # ⭐ 保留完整帖子信息
  1087. "image_list": note.get('image_list', []),
  1088. "cover_image": note.get('cover_image', {}),
  1089. "interact_info": note.get('interact_info', {}),
  1090. "user": note.get('user', {}),
  1091. "type": note.get('type', 'normal'),
  1092. "note_url": note.get('note_url', ''),
  1093. # 评估结果
  1094. "title_relevance": note['evaluation']['title_relevance'],
  1095. "content_expectation": note['evaluation']['content_expectation'],
  1096. "confidence_score": note['evaluation']['confidence_score'],
  1097. "reason": note['evaluation']['reason']
  1098. })
  1099. # 按置信度降序排列
  1100. all_satisfied_notes.sort(key=lambda x: x['confidence_score'], reverse=True)
  1101. print(f"\n共收集到 {len(all_satisfied_notes)} 个满足需求的帖子")
  1102. if all_satisfied_notes:
  1103. print(f"置信度范围: {all_satisfied_notes[0]['confidence_score']:.2f} ~ {all_satisfied_notes[-1]['confidence_score']:.2f}")
  1104. return all_satisfied_notes
  1105. async def step_generate_answer(satisfied_notes: list[dict], original_question: str, context: RunContext) -> dict:
  1106. """
  1107. 步骤5: 基于满足需求的帖子生成答案
  1108. 输入:
  1109. - satisfied_notes: 步骤4收集的满足需求的帖子列表
  1110. - original_question: 原始问题
  1111. - context: 运行上下文
  1112. 输出:
  1113. - 生成的答案及相关信息
  1114. - answer: 答案内容(Markdown格式)
  1115. - cited_note_indices: 引用的帖子索引
  1116. - confidence: 答案置信度
  1117. - summary: 答案摘要
  1118. - cited_notes: 被引用的帖子详情
  1119. """
  1120. step_num = len(context.steps) + 1
  1121. print(f"\n{'='*60}")
  1122. print(f"[步骤 {step_num}] 基于 {len(satisfied_notes)} 个帖子生成答案")
  1123. print(f"{'='*60}")
  1124. if not satisfied_notes:
  1125. print("\n⚠️ 没有满足需求的帖子,无法生成答案")
  1126. result = {
  1127. "answer": "抱歉,未找到能够回答该问题的相关内容。",
  1128. "cited_note_indices": [],
  1129. "confidence": 0.0,
  1130. "summary": "无可用信息",
  1131. "cited_notes": []
  1132. }
  1133. add_step(context, "生成答案", "answer_generation", {
  1134. "original_question": original_question,
  1135. "input_notes_count": 0,
  1136. "result": result
  1137. })
  1138. return result
  1139. # 构建Agent输入
  1140. notes_info = []
  1141. for idx, note in enumerate(satisfied_notes, 1):
  1142. notes_info.append(f"""
  1143. 【帖子 {idx}】
  1144. 标题: {note['title']}
  1145. 描述: {note['desc']}
  1146. 置信度: {note['confidence_score']:.2f}
  1147. """.strip())
  1148. agent_input = f"""
  1149. <原始问题>
  1150. {original_question}
  1151. </原始问题>
  1152. <相关帖子>
  1153. {chr(10).join(notes_info)}
  1154. </相关帖子>
  1155. 请基于以上帖子,为原始问题生成一个全面、准确的答案。
  1156. 记得在答案中使用 [1], [2] 等标注引用的帖子序号。
  1157. """.strip()
  1158. print(f"\n📝 调用答案生成Agent...")
  1159. print(f" - 可用帖子: {len(satisfied_notes)} 个")
  1160. print(f" - 平均置信度: {sum(n['confidence_score'] for n in satisfied_notes) / len(satisfied_notes):.2f}")
  1161. # 调用Agent生成答案
  1162. result_run = await Runner.run(answer_generator, agent_input)
  1163. answer_result: AnswerGeneration = result_run.final_output
  1164. # 提取被引用的帖子详情
  1165. cited_notes = []
  1166. for idx in answer_result.cited_note_indices:
  1167. if 1 <= idx <= len(satisfied_notes):
  1168. note = satisfied_notes[idx - 1]
  1169. cited_notes.append({
  1170. "index": idx,
  1171. "note_id": note['note_id'],
  1172. "title": note['title'],
  1173. "desc": note['desc'],
  1174. "confidence_score": note['confidence_score'],
  1175. # ⭐ 完整帖子信息用于可视化
  1176. "image_list": note.get('image_list', []),
  1177. "cover_image": note.get('cover_image', {}),
  1178. "interact_info": note.get('interact_info', {}),
  1179. "user": note.get('user', {}),
  1180. "note_url": note.get('note_url', ''),
  1181. "type": note.get('type', 'normal'),
  1182. # ⭐ 评估详情
  1183. "title_relevance": note.get('title_relevance', 0),
  1184. "content_expectation": note.get('content_expectation', 0),
  1185. "reason": note.get('reason', '')
  1186. })
  1187. result = {
  1188. "answer": answer_result.answer,
  1189. "cited_note_indices": answer_result.cited_note_indices,
  1190. "confidence": answer_result.confidence,
  1191. "summary": answer_result.summary,
  1192. "cited_notes": cited_notes
  1193. }
  1194. # 打印结果
  1195. print(f"\n✅ 答案生成完成")
  1196. print(f" - 引用帖子数: {len(answer_result.cited_note_indices)} 个")
  1197. print(f" - 答案置信度: {answer_result.confidence:.2f}")
  1198. print(f" - 答案摘要: {answer_result.summary}")
  1199. # 记录步骤
  1200. add_step(context, "生成答案", "answer_generation", {
  1201. "original_question": original_question,
  1202. "input_notes_count": len(satisfied_notes),
  1203. "result": result,
  1204. "agent_input_preview": agent_input[:500] + "..." if len(agent_input) > 500 else agent_input
  1205. })
  1206. return result
  1207. def find_qualified_queries(evaluation_results: list[dict], min_relevance_score: float = 0.7) -> list[dict]:
  1208. """
  1209. 查找所有合格的query(旧函数,保留兼容性)
  1210. 筛选标准:
  1211. 1. intent_match = True(必须满足)
  1212. 2. relevance_score >= min_relevance_score
  1213. 返回:按 relevance_score 降序排列
  1214. """
  1215. all_qualified = []
  1216. for result in evaluation_results:
  1217. for eval_item in result.get("evaluations", []):
  1218. if (eval_item['intent_match'] is True
  1219. and eval_item['relevance_score'] >= min_relevance_score):
  1220. all_qualified.append({
  1221. "from_candidate": result["candidate"],
  1222. **eval_item
  1223. })
  1224. # 按relevance_score降序排列
  1225. return sorted(all_qualified, key=lambda x: x['relevance_score'], reverse=True)
  1226. # ============================================================================
  1227. # 主流程
  1228. # ============================================================================
  1229. async def progressive_exploration(context: RunContext, max_levels: int = 4) -> dict:
  1230. """
  1231. 渐进式探索流程 - 使用独立步骤
  1232. 流程:
  1233. 1. 提取关键词 + 渐进式探索(复用旧流程)
  1234. 2. 步骤1: 评估候选query的推荐词
  1235. 3. 步骤2: 搜索合格的推荐词
  1236. 4. 步骤3: 评估搜索到的帖子
  1237. 5. 步骤4: 汇总满足需求的帖子
  1238. 6. 步骤5: 生成答案
  1239. Args:
  1240. context: 运行上下文
  1241. max_levels: 最大探索层数,默认4
  1242. 返回格式:
  1243. {
  1244. "success": True/False,
  1245. "final_answer": {...}, # 生成的答案
  1246. "satisfied_notes": [...], # 满足需求的帖子
  1247. "message": "..."
  1248. }
  1249. """
  1250. # ========== 阶段1:渐进式探索(复用旧流程找到候选query)==========
  1251. # 1.1 提取关键词
  1252. keyword_result = await extract_keywords(context.q, context)
  1253. context.keywords = keyword_result.keywords
  1254. # 1.2 渐进式探索各层级
  1255. current_level = 1
  1256. candidates_to_evaluate = []
  1257. # Level 1:单个关键词
  1258. level_1_queries = context.keywords # 使用所有关键词
  1259. level_1_data = await explore_level(level_1_queries, current_level, context)
  1260. analysis_1 = await analyze_level(level_1_data, context.exploration_levels, context.q, context)
  1261. if analysis_1.should_evaluate_now:
  1262. candidates_to_evaluate.extend(analysis_1.candidates_to_evaluate)
  1263. # Level 2及以后:迭代探索
  1264. for level_num in range(2, max_levels + 1):
  1265. prev_analysis: LevelAnalysis = context.level_analyses[-1]["analysis"]
  1266. prev_analysis = LevelAnalysis(**prev_analysis)
  1267. if not prev_analysis.next_combinations:
  1268. print(f"\nLevel {level_num-1} 分析后无需继续探索")
  1269. break
  1270. level_data = await explore_level(prev_analysis.next_combinations, level_num, context)
  1271. analysis = await analyze_level(level_data, context.exploration_levels, context.q, context)
  1272. if analysis.should_evaluate_now:
  1273. candidates_to_evaluate.extend(analysis.candidates_to_evaluate)
  1274. if not candidates_to_evaluate:
  1275. return {
  1276. "success": False,
  1277. "final_answer": None,
  1278. "satisfied_notes": [],
  1279. "message": "渐进式探索未找到候选query"
  1280. }
  1281. print(f"\n{'='*60}")
  1282. print(f"渐进式探索完成,找到 {len(candidates_to_evaluate)} 个候选query")
  1283. print(f"{'='*60}")
  1284. # ========== 阶段2:新的独立步骤流程 ==========
  1285. # 步骤1: 评估候选query的推荐词
  1286. evaluation_results = await step_evaluate_query_suggestions(
  1287. candidates_to_evaluate,
  1288. context.q,
  1289. context
  1290. )
  1291. # 步骤1.5: 筛选合格的推荐词
  1292. qualified_queries = step_filter_qualified_queries(
  1293. evaluation_results,
  1294. context,
  1295. min_relevance_score=0.7
  1296. )
  1297. if not qualified_queries:
  1298. return {
  1299. "success": False,
  1300. "final_answer": None,
  1301. "satisfied_notes": [],
  1302. "message": "没有合格的推荐词"
  1303. }
  1304. # 步骤2: 搜索合格的推荐词
  1305. search_results = await step_search_qualified_queries(
  1306. qualified_queries,
  1307. context
  1308. )
  1309. if not search_results.get('searches'):
  1310. return {
  1311. "success": False,
  1312. "final_answer": None,
  1313. "satisfied_notes": [],
  1314. "message": "搜索失败"
  1315. }
  1316. # 步骤3: 评估搜索到的帖子
  1317. note_evaluation_data = await step_evaluate_search_notes(
  1318. search_results,
  1319. context.q,
  1320. context
  1321. )
  1322. # 步骤4: 汇总满足需求的帖子
  1323. satisfied_notes = step_collect_satisfied_notes(note_evaluation_data)
  1324. if not satisfied_notes:
  1325. return {
  1326. "success": False,
  1327. "final_answer": None,
  1328. "satisfied_notes": [],
  1329. "message": "未找到满足需求的帖子"
  1330. }
  1331. # 步骤5: 生成答案
  1332. final_answer = await step_generate_answer(
  1333. satisfied_notes,
  1334. context.q,
  1335. context
  1336. )
  1337. # ========== 返回最终结果 ==========
  1338. return {
  1339. "success": True,
  1340. "final_answer": final_answer,
  1341. "satisfied_notes": satisfied_notes,
  1342. "message": f"成功找到 {len(satisfied_notes)} 个满足需求的帖子,并生成答案"
  1343. }
  1344. # ============================================================================
  1345. # 输出格式化
  1346. # ============================================================================
  1347. def format_output(optimization_result: dict, context: RunContext) -> str:
  1348. """
  1349. 格式化输出结果 - 用于独立步骤流程
  1350. 包含:
  1351. - 生成的答案
  1352. - 引用的帖子详情
  1353. - 满足需求的帖子统计
  1354. """
  1355. final_answer = optimization_result.get("final_answer")
  1356. satisfied_notes = optimization_result.get("satisfied_notes", [])
  1357. output = f"原始问题:{context.q}\n"
  1358. output += f"提取的关键词:{', '.join(context.keywords or [])}\n"
  1359. output += f"探索层数:{len(context.exploration_levels)}\n"
  1360. output += f"找到满足需求的帖子:{len(satisfied_notes)} 个\n"
  1361. output += "\n" + "="*60 + "\n"
  1362. if final_answer:
  1363. output += "【生成的答案】\n\n"
  1364. output += final_answer.get("answer", "")
  1365. output += "\n\n" + "="*60 + "\n"
  1366. output += f"答案置信度:{final_answer.get('confidence', 0):.2f}\n"
  1367. output += f"答案摘要:{final_answer.get('summary', '')}\n"
  1368. output += f"引用帖子数:{len(final_answer.get('cited_note_indices', []))} 个\n"
  1369. output += "\n" + "="*60 + "\n"
  1370. output += "【引用的帖子详情】\n\n"
  1371. for cited_note in final_answer.get("cited_notes", []):
  1372. output += f"[{cited_note['index']}] {cited_note['title']}\n"
  1373. output += f" 置信度: {cited_note['confidence_score']:.2f}\n"
  1374. output += f" 描述: {cited_note['desc']}\n"
  1375. output += f" note_id: {cited_note['note_id']}\n\n"
  1376. else:
  1377. output += "未能生成答案\n"
  1378. return output
  1379. # ============================================================================
  1380. # 主函数
  1381. # ============================================================================
  1382. async def main(input_dir: str, max_levels: int = 4, visualize: bool = False):
  1383. """
  1384. 主函数 - 使用独立步骤流程(方案A)
  1385. """
  1386. current_time, log_url = set_trace()
  1387. # 从目录中读取固定文件名
  1388. input_context_file = os.path.join(input_dir, 'context.md')
  1389. input_q_file = os.path.join(input_dir, 'q.md')
  1390. q_context = read_file_as_string(input_context_file)
  1391. q = read_file_as_string(input_q_file)
  1392. q_with_context = f"""
  1393. <需求上下文>
  1394. {q_context}
  1395. </需求上下文>
  1396. <当前问题>
  1397. {q}
  1398. </当前问题>
  1399. """.strip()
  1400. # 获取当前文件名作为版本
  1401. version = os.path.basename(__file__)
  1402. version_name = os.path.splitext(version)[0]
  1403. # 日志保存目录
  1404. log_dir = os.path.join(input_dir, "output", version_name, current_time)
  1405. run_context = RunContext(
  1406. version=version,
  1407. input_files={
  1408. "input_dir": input_dir,
  1409. "context_file": input_context_file,
  1410. "q_file": input_q_file,
  1411. },
  1412. q_with_context=q_with_context,
  1413. q_context=q_context,
  1414. q=q,
  1415. log_dir=log_dir,
  1416. log_url=log_url,
  1417. )
  1418. # 执行渐进式探索
  1419. optimization_result = await progressive_exploration(run_context, max_levels=max_levels)
  1420. # 格式化输出
  1421. final_output = format_output(optimization_result, run_context)
  1422. print(f"\n{'='*60}")
  1423. print("最终结果")
  1424. print(f"{'='*60}")
  1425. print(final_output)
  1426. # 保存结果
  1427. run_context.optimization_result = optimization_result
  1428. run_context.final_output = final_output
  1429. # 记录最终输出步骤(新格式)
  1430. final_answer = optimization_result.get("final_answer")
  1431. satisfied_notes = optimization_result.get("satisfied_notes", [])
  1432. add_step(run_context, "生成最终结果", "final_result", {
  1433. "success": optimization_result["success"],
  1434. "message": optimization_result["message"],
  1435. "satisfied_notes_count": len(satisfied_notes),
  1436. "final_answer": final_answer,
  1437. "satisfied_notes_summary": [
  1438. {
  1439. "note_id": note["note_id"],
  1440. "title": note["title"],
  1441. "confidence_score": note["confidence_score"]
  1442. }
  1443. for note in satisfied_notes # 保存所有满足条件的帖子摘要
  1444. ] if satisfied_notes else [],
  1445. "final_output": final_output
  1446. })
  1447. # 保存 RunContext 到 log_dir(不包含 steps,steps 单独保存)
  1448. os.makedirs(run_context.log_dir, exist_ok=True)
  1449. context_file_path = os.path.join(run_context.log_dir, "run_context.json")
  1450. context_dict = run_context.model_dump()
  1451. context_dict.pop('steps', None) # 移除 steps,避免数据冗余
  1452. with open(context_file_path, "w", encoding="utf-8") as f:
  1453. json.dump(context_dict, f, ensure_ascii=False, indent=2)
  1454. print(f"\nRunContext saved to: {context_file_path}")
  1455. # 保存步骤化日志
  1456. steps_file_path = os.path.join(run_context.log_dir, "steps.json")
  1457. with open(steps_file_path, "w", encoding="utf-8") as f:
  1458. json.dump(run_context.steps, f, ensure_ascii=False, indent=2)
  1459. print(f"Steps log saved to: {steps_file_path}")
  1460. # 如果需要生成可视化
  1461. if visualize:
  1462. import subprocess
  1463. output_html = os.path.join(run_context.log_dir, "visualization.html")
  1464. print(f"\n🎨 生成可视化HTML...")
  1465. result = subprocess.run([
  1466. "python", "sug_v6_1_2_3.visualize.py",
  1467. steps_file_path,
  1468. "-o", output_html
  1469. ])
  1470. if result.returncode == 0:
  1471. print(f"✅ 可视化已生成: {output_html}")
  1472. else:
  1473. print(f"❌ 可视化生成失败")
  1474. if __name__ == "__main__":
  1475. parser = argparse.ArgumentParser(description="搜索query优化工具 - v6.1.2.3 独立步骤+答案生成版")
  1476. parser.add_argument(
  1477. "--input-dir",
  1478. type=str,
  1479. default="input/简单扣图",
  1480. help="输入目录路径,默认: input/简单扣图"
  1481. )
  1482. parser.add_argument(
  1483. "--max-levels",
  1484. type=int,
  1485. default=10,
  1486. help="最大探索层数,默认: 10"
  1487. )
  1488. parser.add_argument(
  1489. "--visualize",
  1490. action="store_true",
  1491. default=True,
  1492. help="运行完成后自动生成可视化HTML(默认开启)"
  1493. )
  1494. parser.add_argument(
  1495. "--no-visualize",
  1496. action="store_false",
  1497. dest="visualize",
  1498. help="关闭自动生成可视化"
  1499. )
  1500. parser.add_argument(
  1501. "--visualize-only",
  1502. action="store_true",
  1503. help="只生成可视化,不运行搜索流程。自动查找input-dir下最新的输出目录"
  1504. )
  1505. args = parser.parse_args()
  1506. # 如果只是生成可视化
  1507. if args.visualize_only:
  1508. import subprocess
  1509. import glob
  1510. # 获取版本名称
  1511. version_name = os.path.splitext(os.path.basename(__file__))[0]
  1512. output_base = os.path.join(args.input_dir, "output", version_name)
  1513. # 查找最新的输出目录
  1514. if not os.path.exists(output_base):
  1515. print(f"❌ 找不到输出目录: {output_base}")
  1516. sys.exit(1)
  1517. # 获取所有日期目录
  1518. date_dirs = glob.glob(os.path.join(output_base, "*", "*"))
  1519. if not date_dirs:
  1520. print(f"❌ 在 {output_base} 中没有找到输出目录")
  1521. sys.exit(1)
  1522. # 按修改时间排序,获取最新的
  1523. latest_dir = max(date_dirs, key=os.path.getmtime)
  1524. steps_json = os.path.join(latest_dir, "steps.json")
  1525. if not os.path.exists(steps_json):
  1526. print(f"❌ 找不到 steps.json: {steps_json}")
  1527. sys.exit(1)
  1528. output_html = os.path.join(latest_dir, "visualization.html")
  1529. print(f"🎨 找到最新输出目录: {latest_dir}")
  1530. print(f"🎨 生成可视化: {steps_json} -> {output_html}")
  1531. result = subprocess.run([
  1532. "python", "sug_v6_1_2_3.visualize.py",
  1533. steps_json,
  1534. "-o", output_html
  1535. ])
  1536. sys.exit(result.returncode)
  1537. asyncio.run(main(args.input_dir, max_levels=args.max_levels, visualize=args.visualize))