sug_v6_1_2_3.py 67 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844
  1. import asyncio
  2. import json
  3. import os
  4. import argparse
  5. from datetime import datetime
  6. from agents import Agent, Runner
  7. from lib.my_trace import set_trace
  8. from typing import Literal
  9. from pydantic import BaseModel, Field
  10. from lib.utils import read_file_as_string
  11. from lib.client import get_model
  12. MODEL_NAME = "google/gemini-2.5-flash"
  13. from script.search_recommendations.xiaohongshu_search_recommendations import XiaohongshuSearchRecommendations
  14. from script.search.xiaohongshu_search import XiaohongshuSearch
  15. class RunContext(BaseModel):
  16. version: str = Field(..., description="当前运行的脚本版本(文件名)")
  17. input_files: dict[str, str] = Field(..., description="输入文件路径映射")
  18. q_with_context: str
  19. q_context: str
  20. q: str
  21. log_url: str
  22. log_dir: str
  23. # 步骤化日志
  24. steps: list[dict] = Field(default_factory=list, description="执行步骤的详细记录")
  25. # 探索阶段记录(保留用于向后兼容)
  26. keywords: list[str] | None = Field(default=None, description="提取的关键词")
  27. exploration_levels: list[dict] = Field(default_factory=list, description="每一层的探索结果")
  28. level_analyses: list[dict] = Field(default_factory=list, description="每一层的主Agent分析")
  29. # 最终结果
  30. final_candidates: list[str] | None = Field(default=None, description="最终选出的候选query")
  31. evaluation_results: list[dict] | None = Field(default=None, description="候选query的评估结果")
  32. optimization_result: dict | None = Field(default=None, description="最终优化结果对象")
  33. final_output: str | None = Field(default=None, description="最终输出结果(格式化文本)")
  34. # ============================================================================
  35. # Agent 1: 关键词提取专家
  36. # ============================================================================
  37. keyword_extraction_instructions = """
  38. 你是关键词提取专家。给定一个搜索问题(含上下文),提取出**最细粒度的关键概念**。
  39. ## 提取原则
  40. 1. **细粒度优先**:拆分成最小的有意义单元
  41. - 不要保留完整的长句
  42. - 拆分成独立的、有搜索意义的词或短语
  43. 2. **保留核心维度**:
  44. - 地域/对象
  45. - 时间
  46. - 行为/意图:获取、教程、推荐、如何等
  47. - 主题/领域
  48. - 质量/属性
  49. 3. **去掉无意义的虚词**:的、吗、呢等
  50. 4. **保留领域专有词**:不要过度拆分专业术语
  51. - 如果是常见的组合词,保持完整
  52. ## 输出要求
  53. 输出关键词列表,按重要性排序(最核心的在前)。
  54. """.strip()
  55. class KeywordList(BaseModel):
  56. """关键词列表"""
  57. keywords: list[str] = Field(..., description="提取的关键词,按重要性排序")
  58. reasoning: str = Field(..., description="提取理由")
  59. keyword_extractor = Agent[None](
  60. name="关键词提取专家",
  61. instructions=keyword_extraction_instructions,
  62. model=get_model(MODEL_NAME),
  63. output_type=KeywordList,
  64. )
  65. # ============================================================================
  66. # Agent 2: 层级探索分析专家
  67. # ============================================================================
  68. level_analysis_instructions = """
  69. 你是搜索空间探索分析专家。基于当前层级的探索结果,决定下一步行动。
  70. ## 你的任务
  71. 分析当前已探索的词汇空间,判断:
  72. 1. **发现了什么有价值的信号?**
  73. 2. **是否已经可以评估候选了?**
  74. 3. **如果还不够,下一层应该探索什么组合?**
  75. ## 分析维度
  76. ### 1. 信号识别(最重要)
  77. 看推荐词里**出现了什么主题**:
  78. **关键问题:**
  79. - 哪些推荐词**最接近原始需求**?
  80. - 哪些推荐词**揭示了有价值的方向**(即使不完全匹配)?
  81. - 哪些推荐词可以作为**下一层探索的桥梁**?
  82. - 系统对哪些概念理解得好?哪些理解偏了?
  83. ### 2. 组合策略
  84. 基于发现的信号,设计下一层探索:
  85. **组合类型:**
  86. a) **关键词直接组合**
  87. - 两个关键词组合成新query
  88. b) **利用推荐词作为桥梁**(重要!)
  89. - 发现某个推荐词很有价值 → 直接探索这个推荐词
  90. - 或在推荐词基础上加其他关键词
  91. c) **跨层级组合**
  92. - 结合多层发现的有价值推荐词
  93. - 组合成更复杂的query
  94. ### 3. 停止条件
  95. **何时可以评估候选?**
  96. 满足以下之一:
  97. - 推荐词中出现了**明确包含原始需求多个核心要素的query**
  98. - 已经探索到**足够复杂的组合**(3-4个关键词),且推荐词相关
  99. - 探索了**3-4层**,信息已经足够丰富
  100. **何时继续探索?**
  101. - 当前推荐词太泛,没有接近原始需求
  102. - 发现了有价值的信号,但需要进一步组合验证
  103. - 层数还少(< 3层)
  104. ## 输出要求
  105. ### 1. key_findings
  106. 总结当前层发现的关键信息,包括:
  107. - 哪些推荐词最有价值?
  108. - 系统对哪些概念理解得好/不好?
  109. - 发现了什么意外的方向?
  110. ### 2. promising_signals
  111. 列出最有价值的推荐词(来自任何已探索的query),每个说明为什么有价值
  112. ### 3. should_evaluate_now
  113. 是否已经可以开始评估候选了?true/false
  114. ### 4. candidates_to_evaluate
  115. 如果should_evaluate_now=true,列出应该评估的候选query
  116. - 可以是推荐词
  117. - 可以是自己构造的组合
  118. ### 5. next_combinations
  119. 如果should_evaluate_now=false,列出下一层应该探索的query组合
  120. ### 6. reasoning
  121. 详细的推理过程
  122. ## 重要原则
  123. 1. **不要过早评估**:至少探索2层,除非第一层就发现了完美匹配
  124. 2. **充分利用推荐词**:推荐词是系统给的提示,要善用
  125. 3. **保持探索方向的多样性**:不要只盯着一个方向
  126. 4. **识别死胡同**:如果某个方向的推荐词一直不相关,果断放弃
  127. """.strip()
  128. class PromisingSignal(BaseModel):
  129. """有价值的推荐词信号"""
  130. query: str = Field(..., description="推荐词")
  131. from_level: int = Field(..., description="来自哪一层")
  132. reason: str = Field(..., description="为什么有价值")
  133. class LevelAnalysis(BaseModel):
  134. """层级分析结果"""
  135. key_findings: str = Field(..., description="当前层的关键发现")
  136. promising_signals: list[PromisingSignal] = Field(..., description="有价值的推荐词信号")
  137. should_evaluate_now: bool = Field(..., description="是否应该开始评估候选")
  138. candidates_to_evaluate: list[str] = Field(default_factory=list, description="如果should_evaluate_now=true,要评估的候选query列表")
  139. next_combinations: list[str] = Field(default_factory=list, description="如果should_evaluate_now=false,下一层要探索的query组合")
  140. reasoning: str = Field(..., description="详细的推理过程")
  141. level_analyzer = Agent[None](
  142. name="层级探索分析专家",
  143. instructions=level_analysis_instructions,
  144. model=get_model(MODEL_NAME),
  145. output_type=LevelAnalysis,
  146. )
  147. # ============================================================================
  148. # Agent 3: 评估专家(简化版:意图匹配 + 相关性评分)
  149. # ============================================================================
  150. eval_instructions = """
  151. 你是搜索query评估专家。给定原始问题和推荐query,评估两个维度。
  152. ## 评估目标
  153. 用这个推荐query搜索,能否找到满足原始需求的内容?
  154. ## 两层评分
  155. ### 1. intent_match(意图匹配)= true/false
  156. 推荐query的**使用意图**是否与原问题一致?
  157. **核心问题:用户搜索这个推荐词,想做什么?**
  158. **判断标准:**
  159. - 原问题意图:找方法?找教程?找资源/素材?找工具?看作品?
  160. - 推荐词意图:如果用户搜索这个词,他的目的是什么?
  161. **示例:**
  162. - 原问题意图="找素材"
  163. - ✅ true: "素材下载"、"素材网站"、"免费素材"(都是获取素材)
  164. - ❌ false: "素材制作教程"、"如何制作素材"(意图变成学习了)
  165. - 原问题意图="学教程"
  166. - ✅ true: "教程视频"、"教学步骤"、"入门指南"
  167. - ❌ false: "成品展示"、"作品欣赏"(意图变成看作品了)
  168. **评分:**
  169. - true = 意图一致,搜索推荐词能达到原问题的目的
  170. - false = 意图改变,搜索推荐词无法达到原问题的目的
  171. ### 2. relevance_score(相关性)= 0-1 连续分数
  172. 推荐query在**主题、要素、属性**上与原问题的相关程度?
  173. **评估维度:**
  174. - 主题相关:核心主题是否匹配?(如:摄影、旅游、美食)
  175. - 要素覆盖:关键要素保留了多少?(如:地域、时间、对象、工具)
  176. - 属性匹配:质量、风格、特色等属性是否保留?
  177. **评分参考:**
  178. - 0.9-1.0 = 几乎完美匹配,所有核心要素都保留
  179. - 0.7-0.8 = 高度相关,核心要素保留,少数次要要素缺失
  180. - 0.5-0.6 = 中度相关,主题匹配但多个要素缺失
  181. - 0.3-0.4 = 低度相关,只有部分主题相关
  182. - 0-0.2 = 基本不相关
  183. ## 评估策略
  184. 1. **先判断 intent_match**:意图不匹配直接 false,无论相关性多高
  185. 2. **再评估 relevance_score**:在意图匹配的前提下,计算相关性
  186. ## 输出要求
  187. - intent_match: true/false
  188. - relevance_score: 0-1 的浮点数
  189. - reason: 详细的评估理由,需要说明:
  190. - 原问题的意图是什么
  191. - 推荐词的意图是什么
  192. - 为什么判断意图匹配/不匹配
  193. - 相关性分数的依据(哪些要素保留/缺失)
  194. """.strip()
  195. class RelevanceEvaluation(BaseModel):
  196. """评估反馈模型 - 意图匹配 + 相关性"""
  197. intent_match: bool = Field(..., description="意图是否匹配")
  198. relevance_score: float = Field(..., description="相关性分数 0-1,分数越高越相关")
  199. reason: str = Field(..., description="评估理由,需说明意图判断和相关性依据")
  200. evaluator = Agent[None](
  201. name="评估专家",
  202. instructions=eval_instructions,
  203. model=get_model(MODEL_NAME),
  204. output_type=RelevanceEvaluation,
  205. )
  206. # ============================================================================
  207. # Agent 4: 单个帖子需求满足度评估专家
  208. # ============================================================================
  209. note_evaluation_instructions = """
  210. 你是帖子需求满足度评估专家。给定原始问题和一个搜索到的帖子(标题+描述),判断这个帖子能否满足用户的需求。
  211. ## 你的任务
  212. 评估单个帖子的标题和描述,判断用户点开这个帖子后,能否找到满足原始需求的内容。
  213. ## 评估维度
  214. ### 1. 标题相关性(title_relevance)= 0-1 连续分数
  215. **评估标准:**
  216. - 标题是否与原始问题的主题相关?
  217. - 标题是否包含原始问题的关键要素?
  218. - 标题是否表明内容能解决用户的问题?
  219. **评分参考:**
  220. - 0.9-1.0 = 标题高度相关,明确表明能解决问题
  221. - 0.7-0.8 = 标题相关,包含核心要素
  222. - 0.5-0.6 = 标题部分相关,有关联但不明确
  223. - 0.3-0.4 = 标题相关性较低
  224. - 0-0.2 = 标题基本不相关
  225. ### 2. 内容预期(content_expectation)= 0-1 连续分数
  226. **评估标准:**
  227. - 从描述看,内容是否可能包含用户需要的信息?
  228. - 描述是否展示了相关的要素或细节?
  229. - 描述的方向是否与用户需求一致?
  230. **评分参考:**
  231. - 0.9-1.0 = 描述明确表明内容高度符合需求
  232. - 0.7-0.8 = 描述显示内容可能符合需求
  233. - 0.5-0.6 = 描述有一定相关性,但不确定
  234. - 0.3-0.4 = 描述相关性较低
  235. - 0-0.2 = 描述基本不相关
  236. ### 3. 需求满足度(need_satisfaction)= true/false
  237. **核心问题:用户点开这个帖子后,能否找到他需要的内容?**
  238. **判断标准:**
  239. - 综合标题和描述,内容是否大概率能满足需求?
  240. - 如果 title_relevance >= 0.7 且 content_expectation >= 0.6,一般判断为 true
  241. - 否则判断为 false
  242. ### 4. 综合置信度(confidence_score)= 0-1 连续分数
  243. **计算方式:**
  244. - 可以是 title_relevance 和 content_expectation 的加权平均
  245. - 标题权重通常更高(如 0.6 * title + 0.4 * content)
  246. ## 输出要求
  247. - title_relevance: 0-1 的浮点数
  248. - content_expectation: 0-1 的浮点数
  249. - need_satisfaction: true/false
  250. - confidence_score: 0-1 的浮点数
  251. - reason: 详细的评估理由,需要说明:
  252. - 标题与原问题的相关性分析
  253. - 描述的内容预期分析
  254. - 为什么判断能/不能满足需求
  255. - 置信度分数的依据
  256. ## 重要原则
  257. 1. **独立评估**:只评估这一个帖子,不考虑其他帖子
  258. 2. **用户视角**:问"我会点开这个帖子吗?点开后能找到答案吗?"
  259. 3. **标题优先**:标题是用户决定是否点击的关键
  260. 4. **保守判断**:不确定时,倾向于给较低的分数
  261. """.strip()
  262. class NoteEvaluation(BaseModel):
  263. """单个帖子评估模型"""
  264. title_relevance: float = Field(..., description="标题相关性 0-1")
  265. content_expectation: float = Field(..., description="内容预期 0-1")
  266. need_satisfaction: bool = Field(..., description="是否满足需求")
  267. confidence_score: float = Field(..., description="综合置信度 0-1")
  268. reason: str = Field(..., description="详细的评估理由")
  269. note_evaluator = Agent[None](
  270. name="帖子需求满足度评估专家",
  271. instructions=note_evaluation_instructions,
  272. model=get_model(MODEL_NAME),
  273. output_type=NoteEvaluation,
  274. )
  275. # ============================================================================
  276. # Agent 5: 答案生成专家
  277. # ============================================================================
  278. answer_generation_instructions = """
  279. 你是答案生成专家。基于一组满足需求的帖子,为原始问题生成一个全面、准确、有价值的答案。
  280. ## 你的任务
  281. 根据用户的原始问题和一组相关帖子(包含标题、描述、置信度评分),生成一个高质量的答案。
  282. ## 输入信息
  283. 1. **原始问题**:用户提出的具体问题
  284. 2. **相关帖子列表**:每个帖子包含
  285. - 序号(索引)
  286. - 标题
  287. - 描述
  288. - 置信度分数
  289. ## 答案要求
  290. ### 1. 内容要求
  291. - **直接回答问题**:开门见山,第一段就给出核心答案
  292. - **结构清晰**:使用标题、列表、分段等组织内容
  293. - **综合多个来源**:整合多个帖子的信息,不要只依赖一个
  294. - **信息准确**:基于帖子内容,不要编造信息
  295. - **实用性**:提供可操作的建议或具体的信息
  296. ### 2. 引用规范
  297. - **必须标注来源**:每个关键信息都要标注帖子索引
  298. - **引用格式**:使用 `[1]`、`[2]` 等标注帖子序号
  299. - **多来源引用**:如果多个帖子支持同一观点,使用 `[1,2,3]`
  300. - **引用位置**:在相关句子或段落的末尾标注
  301. ### 3. 置信度使用
  302. - **优先高置信度**:优先引用置信度高的帖子
  303. - **交叉验证**:如果多个帖子提到相同信息,可以提高可信度
  304. - **标注不确定性**:如果信息来自低置信度帖子,适当标注
  305. ### 4. 答案结构建议
  306. ```
  307. 【核心答案】
  308. 直接回答用户的问题,给出最核心的信息。[引用]
  309. 【详细说明】
  310. 1. 第一个方面/要点 [引用]
  311. - 具体内容
  312. - 相关细节
  313. 2. 第二个方面/要点 [引用]
  314. - 具体内容
  315. - 相关细节
  316. 【补充建议/注意事项】(可选)
  317. 其他有价值的信息或提醒。[引用]
  318. 【参考帖子】
  319. 列出所有引用的帖子编号和标题。
  320. ```
  321. ## 输出格式
  322. {
  323. "answer": "生成的答案内容(Markdown格式)",
  324. "cited_note_indices": [1, 2, 3], # 引用的帖子序号列表
  325. "confidence": 0.85, # 答案的整体置信度 (0-1)
  326. "summary": "一句话总结答案的核心内容"
  327. }
  328. ## 重要原则
  329. 1. **忠于原文**:不要添加帖子中没有的信息
  330. 2. **引用透明**:让用户知道每个信息来自哪个帖子
  331. 3. **综合性**:尽可能整合多个帖子的信息
  332. 4. **可读性**:使用清晰的Markdown格式
  333. 5. **质量优先**:如果帖子质量不够,可以说明信息有限
  334. """.strip()
  335. class AnswerGeneration(BaseModel):
  336. """答案生成模型"""
  337. answer: str = Field(..., description="生成的答案内容(Markdown格式)")
  338. cited_note_indices: list[int] = Field(..., description="引用的帖子序号列表")
  339. confidence: float = Field(..., description="答案的整体置信度 0-1")
  340. summary: str = Field(..., description="一句话总结答案的核心内容")
  341. answer_generator = Agent[None](
  342. name="答案生成专家",
  343. instructions=answer_generation_instructions,
  344. model=get_model(MODEL_NAME),
  345. output_type=AnswerGeneration,
  346. )
  347. # ============================================================================
  348. # 日志辅助函数
  349. # ============================================================================
  350. def add_step(context: RunContext, step_name: str, step_type: str, data: dict):
  351. """添加步骤记录"""
  352. step = {
  353. "step_number": len(context.steps) + 1,
  354. "step_name": step_name,
  355. "step_type": step_type,
  356. "timestamp": datetime.now().isoformat(),
  357. "data": data
  358. }
  359. context.steps.append(step)
  360. return step
  361. # ============================================================================
  362. # 核心函数
  363. # ============================================================================
  364. async def extract_keywords(q: str, context: RunContext) -> KeywordList:
  365. """提取关键词"""
  366. print("\n[步骤 1] 正在提取关键词...")
  367. result = await Runner.run(keyword_extractor, q)
  368. keyword_list: KeywordList = result.final_output
  369. print(f"提取的关键词:{keyword_list.keywords}")
  370. print(f"提取理由:{keyword_list.reasoning}")
  371. # 记录步骤
  372. add_step(context, "提取关键词", "keyword_extraction", {
  373. "input_question": q,
  374. "keywords": keyword_list.keywords,
  375. "reasoning": keyword_list.reasoning
  376. })
  377. return keyword_list
  378. async def explore_level(queries: list[str], level_num: int, context: RunContext) -> dict:
  379. """探索一个层级(并发获取所有query的推荐词)"""
  380. step_num = len(context.steps) + 1
  381. print(f"\n{'='*60}")
  382. print(f"[步骤 {step_num}] Level {level_num} 探索:{len(queries)} 个query")
  383. print(f"{'='*60}")
  384. xiaohongshu_api = XiaohongshuSearchRecommendations()
  385. # 并发获取所有推荐词
  386. async def get_single_sug(query: str):
  387. print(f" 探索: {query}")
  388. suggestions = xiaohongshu_api.get_recommendations(keyword=query)
  389. print(f" → {len(suggestions) if suggestions else 0} 个推荐词")
  390. return {
  391. "query": query,
  392. "suggestions": suggestions or []
  393. }
  394. results = await asyncio.gather(*[get_single_sug(q) for q in queries])
  395. level_data = {
  396. "level": level_num,
  397. "timestamp": datetime.now().isoformat(),
  398. "queries": results
  399. }
  400. context.exploration_levels.append(level_data)
  401. # 记录步骤
  402. add_step(context, f"Level {level_num} 探索", "level_exploration", {
  403. "level": level_num,
  404. "input_queries": queries,
  405. "query_count": len(queries),
  406. "results": results,
  407. "total_suggestions": sum(len(r['suggestions']) for r in results)
  408. })
  409. return level_data
  410. async def analyze_level(level_data: dict, all_levels: list[dict], original_question: str, context: RunContext) -> LevelAnalysis:
  411. """分析当前层级,决定下一步"""
  412. step_num = len(context.steps) + 1
  413. print(f"\n[步骤 {step_num}] 正在分析 Level {level_data['level']}...")
  414. # 构造输入
  415. analysis_input = f"""
  416. <原始问题>
  417. {original_question}
  418. </原始问题>
  419. <已探索的所有层级>
  420. {json.dumps(all_levels, ensure_ascii=False, indent=2)}
  421. </已探索的所有层级>
  422. <当前层级>
  423. Level {level_data['level']}
  424. {json.dumps(level_data['queries'], ensure_ascii=False, indent=2)}
  425. </当前层级>
  426. 请分析当前探索状态,决定下一步行动。
  427. """
  428. result = await Runner.run(level_analyzer, analysis_input)
  429. analysis: LevelAnalysis = result.final_output
  430. print(f"\n分析结果:")
  431. print(f" 关键发现:{analysis.key_findings}")
  432. print(f" 有价值的信号:{len(analysis.promising_signals)} 个")
  433. print(f" 是否评估:{analysis.should_evaluate_now}")
  434. if analysis.should_evaluate_now:
  435. print(f" 候选query:{analysis.candidates_to_evaluate}")
  436. else:
  437. print(f" 下一层探索:{analysis.next_combinations}")
  438. # 保存分析结果
  439. context.level_analyses.append({
  440. "level": level_data['level'],
  441. "timestamp": datetime.now().isoformat(),
  442. "analysis": analysis.model_dump()
  443. })
  444. # 记录步骤
  445. add_step(context, f"Level {level_data['level']} 分析", "level_analysis", {
  446. "level": level_data['level'],
  447. "key_findings": analysis.key_findings,
  448. "promising_signals_count": len(analysis.promising_signals),
  449. "promising_signals": [s.model_dump() for s in analysis.promising_signals],
  450. "should_evaluate_now": analysis.should_evaluate_now,
  451. "candidates_to_evaluate": analysis.candidates_to_evaluate if analysis.should_evaluate_now else [],
  452. "next_combinations": analysis.next_combinations if not analysis.should_evaluate_now else [],
  453. "reasoning": analysis.reasoning
  454. })
  455. return analysis
  456. async def evaluate_candidates(candidates: list[str], original_question: str, context: RunContext) -> list[dict]:
  457. """评估候选query(含实际搜索验证)"""
  458. step_num = len(context.steps) + 1
  459. print(f"\n{'='*60}")
  460. print(f"[步骤 {step_num}] 评估 {len(candidates)} 个候选query")
  461. print(f"{'='*60}")
  462. xiaohongshu_api = XiaohongshuSearchRecommendations()
  463. xiaohongshu_search = XiaohongshuSearch()
  464. # 创建搜索结果保存目录
  465. search_results_dir = os.path.join(context.log_dir, "search_results")
  466. os.makedirs(search_results_dir, exist_ok=True)
  467. async def evaluate_single_candidate(candidate: str, candidate_index: int):
  468. print(f"\n评估候选:{candidate}")
  469. # 为当前候选创建子目录
  470. # 清理候选名称,移除不适合作为目录名的字符
  471. safe_candidate_name = "".join(c if c.isalnum() or c in (' ', '_', '-') else '_' for c in candidate)
  472. candidate_dir = os.path.join(search_results_dir, f"candidate_{candidate_index+1}_{safe_candidate_name[:50]}")
  473. os.makedirs(candidate_dir, exist_ok=True)
  474. # 1. 获取推荐词
  475. suggestions = xiaohongshu_api.get_recommendations(keyword=candidate)
  476. print(f" 获取到 {len(suggestions) if suggestions else 0} 个推荐词")
  477. if not suggestions:
  478. return {
  479. "candidate": candidate,
  480. "suggestions": [],
  481. "evaluations": []
  482. }
  483. # 2. 评估每个推荐词(意图匹配 + 相关性)
  484. async def eval_single_sug(sug: str, sug_index: int):
  485. # 2.1 先进行意图和相关性评估
  486. eval_input = f"""
  487. <原始问题>
  488. {original_question}
  489. </原始问题>
  490. <待评估的推荐query>
  491. {sug}
  492. </待评估的推荐query>
  493. 请评估该推荐query:
  494. 1. intent_match: 意图是否匹配(true/false)
  495. 2. relevance_score: 相关性分数(0-1)
  496. 3. reason: 详细的评估理由
  497. """
  498. result = await Runner.run(evaluator, eval_input)
  499. evaluation: RelevanceEvaluation = result.final_output
  500. eval_result = {
  501. "query": sug,
  502. "intent_match": evaluation.intent_match,
  503. "relevance_score": evaluation.relevance_score,
  504. "reason": evaluation.reason,
  505. }
  506. # 2.2 如果意图匹配且相关性足够高,进行实际搜索验证
  507. if evaluation.intent_match and evaluation.relevance_score >= 0.7:
  508. print(f" → 合格候选,进行实际搜索验证: {sug}")
  509. try:
  510. search_result = xiaohongshu_search.search(keyword=sug)
  511. # 解析result字段(它是JSON字符串,需要先解析)
  512. result_str = search_result.get("result", "{}")
  513. if isinstance(result_str, str):
  514. result_data = json.loads(result_str)
  515. else:
  516. result_data = result_str
  517. # 格式化搜索结果:将result字段解析后再保存
  518. formatted_search_result = {
  519. "success": search_result.get("success"),
  520. "result": result_data, # 保存解析后的数据
  521. "tool_name": search_result.get("tool_name"),
  522. "call_type": search_result.get("call_type"),
  523. "query": sug,
  524. "timestamp": datetime.now().isoformat()
  525. }
  526. # 保存格式化后的搜索结果到文件
  527. safe_sug_name = "".join(c if c.isalnum() or c in (' ', '_', '-') else '_' for c in sug)
  528. search_result_file = os.path.join(candidate_dir, f"sug_{sug_index+1}_{safe_sug_name[:30]}.json")
  529. with open(search_result_file, 'w', encoding='utf-8') as f:
  530. json.dump(formatted_search_result, f, ensure_ascii=False, indent=2)
  531. print(f" 搜索结果已保存: {os.path.basename(search_result_file)}")
  532. # 提取搜索结果的标题和描述
  533. # 正确的数据路径: result.data.data[]
  534. notes = result_data.get("data", {}).get("data", [])
  535. if notes:
  536. print(f" 开始评估 {len(notes)} 个帖子...")
  537. # 对每个帖子进行独立评估
  538. note_evaluations = []
  539. for note_idx, note in enumerate(notes[:10], 1): # 只评估前10条
  540. note_card = note.get("note_card", {})
  541. title = note_card.get("display_title", "")
  542. desc = note_card.get("desc", "")
  543. note_id = note.get("id", "")
  544. # 构造评估输入
  545. eval_input = f"""
  546. <原始问题>
  547. {original_question}
  548. </原始问题>
  549. <帖子信息>
  550. 标题: {title}
  551. 描述: {desc}
  552. </帖子信息>
  553. 请评估这个帖子能否满足用户需求。
  554. """
  555. # 调用评估Agent
  556. eval_result_run = await Runner.run(note_evaluator, eval_input)
  557. note_eval: NoteEvaluation = eval_result_run.final_output
  558. note_evaluation_record = {
  559. "note_index": note_idx,
  560. "note_id": note_id,
  561. "title": title,
  562. "desc": desc[:200], # 只保存前200字
  563. "evaluation": {
  564. "title_relevance": note_eval.title_relevance,
  565. "content_expectation": note_eval.content_expectation,
  566. "need_satisfaction": note_eval.need_satisfaction,
  567. "confidence_score": note_eval.confidence_score,
  568. "reason": note_eval.reason
  569. }
  570. }
  571. note_evaluations.append(note_evaluation_record)
  572. # 简单打印进度
  573. if note_idx % 3 == 0 or note_idx == len(notes[:10]):
  574. print(f" 已评估 {note_idx}/{len(notes[:10])} 个帖子")
  575. # 统计满足需求的帖子数量
  576. satisfied_count = sum(1 for ne in note_evaluations if ne["evaluation"]["need_satisfaction"])
  577. avg_confidence = sum(ne["evaluation"]["confidence_score"] for ne in note_evaluations) / len(note_evaluations) if note_evaluations else 0
  578. eval_result["search_verification"] = {
  579. "total_notes": len(notes),
  580. "evaluated_notes": len(note_evaluations),
  581. "satisfied_count": satisfied_count,
  582. "average_confidence": round(avg_confidence, 2),
  583. "note_evaluations": note_evaluations,
  584. "search_result_file": search_result_file
  585. }
  586. print(f" 评估完成: {satisfied_count}/{len(note_evaluations)} 个帖子满足需求, "
  587. f"平均置信度={avg_confidence:.2f}")
  588. else:
  589. eval_result["search_verification"] = {
  590. "total_notes": 0,
  591. "evaluated_notes": 0,
  592. "satisfied_count": 0,
  593. "average_confidence": 0.0,
  594. "note_evaluations": [],
  595. "search_result_file": search_result_file,
  596. "reason": "搜索无结果"
  597. }
  598. print(f" 搜索无结果")
  599. except Exception as e:
  600. print(f" 搜索验证出错: {e}")
  601. eval_result["search_verification"] = {
  602. "error": str(e)
  603. }
  604. return eval_result
  605. evaluations = await asyncio.gather(*[eval_single_sug(s, i) for i, s in enumerate(suggestions)])
  606. return {
  607. "candidate": candidate,
  608. "suggestions": suggestions,
  609. "evaluations": evaluations
  610. }
  611. results = await asyncio.gather(*[evaluate_single_candidate(c, i) for i, c in enumerate(candidates)])
  612. # 生成搜索结果汇总文件
  613. summary_data = {
  614. "original_question": original_question,
  615. "timestamp": datetime.now().isoformat(),
  616. "total_candidates": len(candidates),
  617. "candidates": []
  618. }
  619. for i, result in enumerate(results):
  620. candidate_summary = {
  621. "index": i + 1,
  622. "candidate": result["candidate"],
  623. "suggestions_count": len(result["suggestions"]),
  624. "verified_queries": []
  625. }
  626. for eval_item in result.get("evaluations", []):
  627. if "search_verification" in eval_item and "search_result_file" in eval_item["search_verification"]:
  628. sv = eval_item["search_verification"]
  629. candidate_summary["verified_queries"].append({
  630. "query": eval_item["query"],
  631. "intent_match": eval_item["intent_match"],
  632. "relevance_score": eval_item["relevance_score"],
  633. "verification": {
  634. "total_notes": sv.get("total_notes", 0),
  635. "evaluated_notes": sv.get("evaluated_notes", 0),
  636. "satisfied_count": sv.get("satisfied_count", 0),
  637. "average_confidence": sv.get("average_confidence", 0.0)
  638. },
  639. "search_result_file": sv["search_result_file"]
  640. })
  641. summary_data["candidates"].append(candidate_summary)
  642. # 保存汇总文件
  643. summary_file = os.path.join(search_results_dir, "summary.json")
  644. with open(summary_file, 'w', encoding='utf-8') as f:
  645. json.dump(summary_data, f, ensure_ascii=False, indent=2)
  646. print(f"\n搜索结果汇总已保存: {summary_file}")
  647. context.evaluation_results = results
  648. # 构建详细的步骤记录数据
  649. step_data = {
  650. "candidate_count": len(candidates),
  651. "candidates": candidates,
  652. "total_evaluations": sum(len(r['evaluations']) for r in results),
  653. "verified_queries": sum(
  654. 1 for r in results
  655. for e in r.get('evaluations', [])
  656. if 'search_verification' in e
  657. ),
  658. "search_results_dir": search_results_dir,
  659. "summary_file": summary_file,
  660. "detailed_results": []
  661. }
  662. # 为每个候选记录详细信息
  663. for result in results:
  664. candidate_detail = {
  665. "candidate": result["candidate"],
  666. "suggestions": result["suggestions"],
  667. "evaluations": []
  668. }
  669. for eval_item in result.get("evaluations", []):
  670. eval_detail = {
  671. "query": eval_item["query"],
  672. "intent_match": eval_item["intent_match"],
  673. "relevance_score": eval_item["relevance_score"],
  674. "reason": eval_item["reason"]
  675. }
  676. # 如果有搜索验证,添加详细信息
  677. if "search_verification" in eval_item:
  678. verification = eval_item["search_verification"]
  679. eval_detail["search_verification"] = {
  680. "performed": True,
  681. "total_notes": verification.get("total_notes", 0),
  682. "evaluated_notes": verification.get("evaluated_notes", 0),
  683. "satisfied_count": verification.get("satisfied_count", 0),
  684. "average_confidence": verification.get("average_confidence", 0.0),
  685. "search_result_file": verification.get("search_result_file"),
  686. "has_error": "error" in verification
  687. }
  688. if "error" in verification:
  689. eval_detail["search_verification"]["error"] = verification["error"]
  690. # 保存每个帖子的评估详情
  691. if "note_evaluations" in verification:
  692. eval_detail["search_verification"]["note_evaluations"] = verification["note_evaluations"]
  693. else:
  694. eval_detail["search_verification"] = {
  695. "performed": False,
  696. "reason": "未达到搜索验证阈值(intent_match=False 或 relevance_score<0.7)"
  697. }
  698. candidate_detail["evaluations"].append(eval_detail)
  699. step_data["detailed_results"].append(candidate_detail)
  700. # 记录步骤
  701. add_step(context, "评估候选query", "candidate_evaluation", step_data)
  702. return results
  703. # ============================================================================
  704. # 新的独立步骤函数(方案A)
  705. # ============================================================================
  706. async def step_evaluate_query_suggestions(candidates: list[str], original_question: str, context: RunContext) -> list[dict]:
  707. """
  708. 步骤1: 评估候选query的推荐词
  709. 输入:
  710. - candidates: 候选query列表
  711. - original_question: 原始问题
  712. - context: 运行上下文
  713. 输出:
  714. - 每个候选的评估结果列表,包含:
  715. - candidate: 候选query
  716. - suggestions: 推荐词列表
  717. - evaluations: 每个推荐词的意图匹配和相关性评分
  718. """
  719. step_num = len(context.steps) + 1
  720. print(f"\n{'='*60}")
  721. print(f"[步骤 {step_num}] 评估 {len(candidates)} 个候选query的推荐词")
  722. print(f"{'='*60}")
  723. xiaohongshu_api = XiaohongshuSearchRecommendations()
  724. async def evaluate_single_candidate(candidate: str):
  725. print(f"\n评估候选:{candidate}")
  726. # 1. 获取推荐词
  727. suggestions = xiaohongshu_api.get_recommendations(keyword=candidate)
  728. print(f" 获取到 {len(suggestions) if suggestions else 0} 个推荐词")
  729. if not suggestions:
  730. return {
  731. "candidate": candidate,
  732. "suggestions": [],
  733. "evaluations": []
  734. }
  735. # 2. 评估每个推荐词(只做意图匹配和相关性评分)
  736. async def eval_single_sug(sug: str):
  737. eval_input = f"""
  738. <原始问题>
  739. {original_question}
  740. </原始问题>
  741. <待评估的推荐query>
  742. {sug}
  743. </待评估的推荐query>
  744. 请评估该推荐query:
  745. 1. intent_match: 意图是否匹配(true/false)
  746. 2. relevance_score: 相关性分数(0-1)
  747. 3. reason: 详细的评估理由
  748. """
  749. result = await Runner.run(evaluator, eval_input)
  750. evaluation: RelevanceEvaluation = result.final_output
  751. return {
  752. "query": sug,
  753. "intent_match": evaluation.intent_match,
  754. "relevance_score": evaluation.relevance_score,
  755. "reason": evaluation.reason
  756. }
  757. evaluations = await asyncio.gather(*[eval_single_sug(s) for s in suggestions])
  758. return {
  759. "candidate": candidate,
  760. "suggestions": suggestions,
  761. "evaluations": evaluations
  762. }
  763. results = await asyncio.gather(*[evaluate_single_candidate(c) for c in candidates])
  764. # 记录步骤
  765. add_step(context, "评估候选query的推荐词", "query_suggestion_evaluation", {
  766. "candidate_count": len(candidates),
  767. "candidates": candidates,
  768. "results": results,
  769. "total_evaluations": sum(len(r['evaluations']) for r in results),
  770. "qualified_count": sum(
  771. 1 for r in results
  772. for e in r['evaluations']
  773. if e['intent_match'] and e['relevance_score'] >= 0.7
  774. )
  775. })
  776. return results
  777. def step_filter_qualified_queries(evaluation_results: list[dict], context: RunContext, min_relevance_score: float = 0.7) -> list[dict]:
  778. """
  779. 步骤1.5: 筛选合格的推荐词
  780. 输入:
  781. - evaluation_results: 步骤1的评估结果
  782. 输出:
  783. - 合格的query列表,每个包含:
  784. - query: 推荐词
  785. - from_candidate: 来源候选
  786. - intent_match: 意图匹配
  787. - relevance_score: 相关性分数
  788. - reason: 评估理由
  789. """
  790. step_num = len(context.steps) + 1
  791. print(f"\n{'='*60}")
  792. print(f"[步骤 {step_num}] 筛选合格的推荐词")
  793. print(f"{'='*60}")
  794. qualified_queries = []
  795. all_queries = [] # 保存所有查询,包括不合格的
  796. for result in evaluation_results:
  797. candidate = result["candidate"]
  798. for eval_item in result.get("evaluations", []):
  799. query_data = {
  800. "query": eval_item["query"],
  801. "from_candidate": candidate,
  802. "intent_match": eval_item["intent_match"],
  803. "relevance_score": eval_item["relevance_score"],
  804. "reason": eval_item["reason"]
  805. }
  806. # 判断是否合格
  807. is_qualified = (eval_item['intent_match'] is True
  808. and eval_item['relevance_score'] >= min_relevance_score)
  809. query_data["is_qualified"] = is_qualified
  810. all_queries.append(query_data)
  811. if is_qualified:
  812. qualified_queries.append(query_data)
  813. # 按相关性分数降序排列
  814. qualified_queries.sort(key=lambda x: x['relevance_score'], reverse=True)
  815. all_queries.sort(key=lambda x: x['relevance_score'], reverse=True)
  816. print(f"\n找到 {len(qualified_queries)} 个合格的推荐词 (共评估 {len(all_queries)} 个)")
  817. if qualified_queries:
  818. print(f"相关性分数范围: {qualified_queries[0]['relevance_score']:.2f} ~ {qualified_queries[-1]['relevance_score']:.2f}")
  819. print("\n合格的推荐词:")
  820. for idx, q in enumerate(qualified_queries[:5], 1):
  821. print(f" {idx}. {q['query']} (分数: {q['relevance_score']:.2f})")
  822. if len(qualified_queries) > 5:
  823. print(f" ... 还有 {len(qualified_queries) - 5} 个")
  824. # 记录步骤 - 保存所有查询数据
  825. add_step(context, "筛选合格的推荐词", "filter_qualified_queries", {
  826. "input_evaluation_count": len(all_queries),
  827. "min_relevance_score": min_relevance_score,
  828. "qualified_count": len(qualified_queries),
  829. "qualified_queries": qualified_queries,
  830. "all_queries": all_queries # 新增:保存所有查询数据
  831. })
  832. return qualified_queries
  833. async def step_search_qualified_queries(qualified_queries: list[dict], context: RunContext) -> dict:
  834. """
  835. 步骤2: 搜索合格的推荐词
  836. 输入:
  837. - qualified_queries: 步骤1.5筛选出的合格query列表,每个包含:
  838. - query: 推荐词
  839. - from_candidate: 来源候选
  840. - intent_match, relevance_score, reason
  841. 输出:
  842. - 搜索结果字典,包含:
  843. - searches: 每个query的搜索结果列表
  844. - search_results_dir: 搜索结果保存目录
  845. """
  846. step_num = len(context.steps) + 1
  847. print(f"\n{'='*60}")
  848. print(f"[步骤 {step_num}] 搜索 {len(qualified_queries)} 个合格的推荐词")
  849. print(f"{'='*60}")
  850. if not qualified_queries:
  851. add_step(context, "搜索合格的推荐词", "search_qualified_queries", {
  852. "qualified_count": 0,
  853. "searches": []
  854. })
  855. return {"searches": [], "search_results_dir": None}
  856. # 创建搜索结果保存目录
  857. search_results_dir = os.path.join(context.log_dir, "search_results")
  858. os.makedirs(search_results_dir, exist_ok=True)
  859. xiaohongshu_search = XiaohongshuSearch()
  860. # 搜索每个合格的query
  861. async def search_single_query(query_info: dict, query_index: int):
  862. query = query_info['query']
  863. print(f"\n搜索 [{query_index+1}/{len(qualified_queries)}]: {query}")
  864. try:
  865. # 执行搜索
  866. search_result = xiaohongshu_search.search(keyword=query)
  867. # 解析result字段
  868. result_str = search_result.get("result", "{}")
  869. if isinstance(result_str, str):
  870. result_data = json.loads(result_str)
  871. else:
  872. result_data = result_str
  873. # 格式化搜索结果
  874. formatted_search_result = {
  875. "success": search_result.get("success"),
  876. "result": result_data,
  877. "tool_name": search_result.get("tool_name"),
  878. "call_type": search_result.get("call_type"),
  879. "query": query,
  880. "timestamp": datetime.now().isoformat()
  881. }
  882. # 保存到文件
  883. safe_query_name = "".join(c if c.isalnum() or c in (' ', '_', '-') else '_' for c in query)
  884. query_dir = os.path.join(search_results_dir, f"query_{query_index+1}_{safe_query_name[:50]}")
  885. os.makedirs(query_dir, exist_ok=True)
  886. search_result_file = os.path.join(query_dir, "search_result.json")
  887. with open(search_result_file, 'w', encoding='utf-8') as f:
  888. json.dump(formatted_search_result, f, ensure_ascii=False, indent=2)
  889. # 提取帖子列表
  890. notes = result_data.get("data", {}).get("data", [])
  891. print(f" → 搜索成功,获得 {len(notes)} 个帖子")
  892. # ⭐ 提取帖子摘要信息用于steps.json
  893. notes_summary = []
  894. for note in notes[:10]: # 只保存前10个
  895. note_card = note.get("note_card", {})
  896. image_list = note_card.get("image_list", [])
  897. interact_info = note_card.get("interact_info", {})
  898. user_info = note_card.get("user", {})
  899. notes_summary.append({
  900. "note_id": note.get("id", ""),
  901. "title": note_card.get("display_title", ""),
  902. "desc": note_card.get("desc", "")[:200],
  903. "cover_image": image_list[0] if image_list else {},
  904. "interact_info": {
  905. "liked_count": interact_info.get("liked_count", 0),
  906. "collected_count": interact_info.get("collected_count", 0),
  907. "comment_count": interact_info.get("comment_count", 0),
  908. "shared_count": interact_info.get("shared_count", 0)
  909. },
  910. "user": {
  911. "nickname": user_info.get("nickname", ""),
  912. "user_id": user_info.get("user_id", "")
  913. },
  914. "type": note_card.get("type", "normal")
  915. })
  916. return {
  917. "query": query,
  918. "from_candidate": query_info['from_candidate'],
  919. "intent_match": query_info['intent_match'],
  920. "relevance_score": query_info['relevance_score'],
  921. "reason": query_info['reason'],
  922. "search_result_file": search_result_file,
  923. "note_count": len(notes),
  924. "notes": notes[:10], # 只保存前10个用于评估
  925. "notes_summary": notes_summary # ⭐ 保存到steps.json
  926. }
  927. except Exception as e:
  928. print(f" → 搜索失败: {e}")
  929. return {
  930. "query": query,
  931. "from_candidate": query_info['from_candidate'],
  932. "intent_match": query_info['intent_match'],
  933. "relevance_score": query_info['relevance_score'],
  934. "reason": query_info['reason'],
  935. "error": str(e),
  936. "note_count": 0,
  937. "notes": []
  938. }
  939. search_results = await asyncio.gather(*[search_single_query(q, i) for i, q in enumerate(qualified_queries)])
  940. # 记录步骤
  941. add_step(context, "搜索合格的推荐词", "search_qualified_queries", {
  942. "qualified_count": len(qualified_queries),
  943. "search_results": [
  944. {
  945. "query": sr['query'],
  946. "from_candidate": sr['from_candidate'],
  947. "note_count": sr['note_count'],
  948. "search_result_file": sr.get('search_result_file'),
  949. "has_error": 'error' in sr,
  950. "notes_summary": sr.get('notes_summary', []) # ⭐ 包含帖子摘要
  951. }
  952. for sr in search_results
  953. ],
  954. "search_results_dir": search_results_dir
  955. })
  956. return {
  957. "searches": search_results,
  958. "search_results_dir": search_results_dir
  959. }
  960. async def step_evaluate_search_notes(search_data: dict, original_question: str, context: RunContext) -> dict:
  961. """
  962. 步骤3: 评估搜索到的帖子
  963. 输入:
  964. - search_data: 步骤2的搜索结果,包含:
  965. - searches: 搜索结果列表
  966. - search_results_dir: 结果目录
  967. 输出:
  968. - 帖子评估结果字典,包含:
  969. - note_evaluations: 每个query的帖子评估列表
  970. """
  971. step_num = len(context.steps) + 1
  972. print(f"\n{'='*60}")
  973. print(f"[步骤 {step_num}] 评估搜索到的帖子")
  974. print(f"{'='*60}")
  975. search_results = search_data['searches']
  976. if not search_results:
  977. add_step(context, "评估搜索到的帖子", "evaluate_search_notes", {
  978. "query_count": 0,
  979. "total_notes": 0,
  980. "evaluated_notes": 0,
  981. "note_evaluations": []
  982. })
  983. return {"note_evaluations": []}
  984. # 对每个query的帖子进行评估
  985. async def evaluate_query_notes(search_result: dict, query_index: int):
  986. query = search_result['query']
  987. notes = search_result.get('notes', [])
  988. if not notes or 'error' in search_result:
  989. return {
  990. "query": query,
  991. "from_candidate": search_result['from_candidate'],
  992. "note_count": 0,
  993. "evaluated_notes": [],
  994. "satisfied_count": 0,
  995. "average_confidence": 0.0
  996. }
  997. print(f"\n评估query [{query_index+1}]: {query} ({len(notes)} 个帖子)")
  998. # 评估每个帖子
  999. note_evaluations = []
  1000. for note_idx, note in enumerate(notes, 1):
  1001. note_card = note.get("note_card", {})
  1002. title = note_card.get("display_title", "")
  1003. desc = note_card.get("desc", "")
  1004. note_id = note.get("id", "")
  1005. # ⭐ 提取完整帖子信息用于可视化
  1006. image_list = note_card.get("image_list", [])
  1007. cover_image = image_list[0] if image_list else {}
  1008. interact_info = note_card.get("interact_info", {})
  1009. user_info = note_card.get("user", {})
  1010. # 调用评估Agent
  1011. eval_input = f"""
  1012. <原始问题>
  1013. {original_question}
  1014. </原始问题>
  1015. <帖子信息>
  1016. 标题: {title}
  1017. 描述: {desc}
  1018. </帖子信息>
  1019. 请评估这个帖子能否满足用户需求。
  1020. """
  1021. eval_result_run = await Runner.run(note_evaluator, eval_input)
  1022. note_eval: NoteEvaluation = eval_result_run.final_output
  1023. note_evaluations.append({
  1024. "note_index": note_idx,
  1025. "note_id": note_id,
  1026. "title": title,
  1027. "desc": desc[:200],
  1028. # ⭐ 新增:完整帖子信息
  1029. "image_list": image_list,
  1030. "cover_image": cover_image,
  1031. "interact_info": {
  1032. "liked_count": interact_info.get("liked_count", 0),
  1033. "collected_count": interact_info.get("collected_count", 0),
  1034. "comment_count": interact_info.get("comment_count", 0),
  1035. "shared_count": interact_info.get("shared_count", 0)
  1036. },
  1037. "user": {
  1038. "nickname": user_info.get("nickname", ""),
  1039. "user_id": user_info.get("user_id", "")
  1040. },
  1041. "type": note_card.get("type", "normal"),
  1042. "note_url": f"https://www.xiaohongshu.com/explore/{note_id}",
  1043. "evaluation": {
  1044. "title_relevance": note_eval.title_relevance,
  1045. "content_expectation": note_eval.content_expectation,
  1046. "need_satisfaction": note_eval.need_satisfaction,
  1047. "confidence_score": note_eval.confidence_score,
  1048. "reason": note_eval.reason
  1049. }
  1050. })
  1051. if note_idx % 3 == 0 or note_idx == len(notes):
  1052. print(f" 已评估 {note_idx}/{len(notes)} 个帖子")
  1053. # 统计
  1054. satisfied_count = sum(1 for ne in note_evaluations if ne["evaluation"]["need_satisfaction"])
  1055. avg_confidence = sum(ne["evaluation"]["confidence_score"] for ne in note_evaluations) / len(note_evaluations) if note_evaluations else 0
  1056. print(f" → 完成:{satisfied_count}/{len(note_evaluations)} 个帖子满足需求")
  1057. return {
  1058. "query": query,
  1059. "from_candidate": search_result['from_candidate'],
  1060. "note_count": len(notes),
  1061. "evaluated_notes": note_evaluations,
  1062. "satisfied_count": satisfied_count,
  1063. "average_confidence": round(avg_confidence, 2)
  1064. }
  1065. # 并发评估所有query的帖子
  1066. all_evaluations = await asyncio.gather(*[evaluate_query_notes(sr, i) for i, sr in enumerate(search_results, 1)])
  1067. # 记录步骤
  1068. total_notes = sum(e['note_count'] for e in all_evaluations)
  1069. total_satisfied = sum(e['satisfied_count'] for e in all_evaluations)
  1070. add_step(context, "评估搜索到的帖子", "evaluate_search_notes", {
  1071. "query_count": len(search_results),
  1072. "total_notes": total_notes,
  1073. "total_satisfied": total_satisfied,
  1074. "note_evaluations": all_evaluations
  1075. })
  1076. return {"note_evaluations": all_evaluations}
  1077. def step_collect_satisfied_notes(note_evaluation_data: dict) -> list[dict]:
  1078. """
  1079. 步骤4: 汇总所有满足需求的帖子
  1080. 输入:
  1081. - note_evaluation_data: 步骤3的帖子评估结果
  1082. 输出:
  1083. - 所有满足需求的帖子列表,按置信度降序排列
  1084. """
  1085. print(f"\n{'='*60}")
  1086. print(f"汇总满足需求的帖子")
  1087. print(f"{'='*60}")
  1088. all_satisfied_notes = []
  1089. for query_eval in note_evaluation_data['note_evaluations']:
  1090. for note in query_eval['evaluated_notes']:
  1091. if note['evaluation']['need_satisfaction']:
  1092. all_satisfied_notes.append({
  1093. "query": query_eval['query'],
  1094. "from_candidate": query_eval['from_candidate'],
  1095. "note_id": note['note_id'],
  1096. "title": note['title'],
  1097. "desc": note['desc'],
  1098. # ⭐ 保留完整帖子信息
  1099. "image_list": note.get('image_list', []),
  1100. "cover_image": note.get('cover_image', {}),
  1101. "interact_info": note.get('interact_info', {}),
  1102. "user": note.get('user', {}),
  1103. "type": note.get('type', 'normal'),
  1104. "note_url": note.get('note_url', ''),
  1105. # 评估结果
  1106. "title_relevance": note['evaluation']['title_relevance'],
  1107. "content_expectation": note['evaluation']['content_expectation'],
  1108. "confidence_score": note['evaluation']['confidence_score'],
  1109. "reason": note['evaluation']['reason']
  1110. })
  1111. # 按置信度降序排列
  1112. all_satisfied_notes.sort(key=lambda x: x['confidence_score'], reverse=True)
  1113. print(f"\n共收集到 {len(all_satisfied_notes)} 个满足需求的帖子")
  1114. if all_satisfied_notes:
  1115. print(f"置信度范围: {all_satisfied_notes[0]['confidence_score']:.2f} ~ {all_satisfied_notes[-1]['confidence_score']:.2f}")
  1116. return all_satisfied_notes
  1117. async def step_generate_answer(satisfied_notes: list[dict], original_question: str, context: RunContext) -> dict:
  1118. """
  1119. 步骤5: 基于满足需求的帖子生成答案
  1120. 输入:
  1121. - satisfied_notes: 步骤4收集的满足需求的帖子列表
  1122. - original_question: 原始问题
  1123. - context: 运行上下文
  1124. 输出:
  1125. - 生成的答案及相关信息
  1126. - answer: 答案内容(Markdown格式)
  1127. - cited_note_indices: 引用的帖子索引
  1128. - confidence: 答案置信度
  1129. - summary: 答案摘要
  1130. - cited_notes: 被引用的帖子详情
  1131. """
  1132. step_num = len(context.steps) + 1
  1133. print(f"\n{'='*60}")
  1134. print(f"[步骤 {step_num}] 基于 {len(satisfied_notes)} 个帖子生成答案")
  1135. print(f"{'='*60}")
  1136. if not satisfied_notes:
  1137. print("\n⚠️ 没有满足需求的帖子,无法生成答案")
  1138. result = {
  1139. "answer": "抱歉,未找到能够回答该问题的相关内容。",
  1140. "cited_note_indices": [],
  1141. "confidence": 0.0,
  1142. "summary": "无可用信息",
  1143. "cited_notes": []
  1144. }
  1145. add_step(context, "生成答案", "answer_generation", {
  1146. "original_question": original_question,
  1147. "input_notes_count": 0,
  1148. "result": result
  1149. })
  1150. return result
  1151. # 构建Agent输入
  1152. notes_info = []
  1153. for idx, note in enumerate(satisfied_notes, 1):
  1154. notes_info.append(f"""
  1155. 【帖子 {idx}】
  1156. 标题: {note['title']}
  1157. 描述: {note['desc']}
  1158. 置信度: {note['confidence_score']:.2f}
  1159. """.strip())
  1160. agent_input = f"""
  1161. <原始问题>
  1162. {original_question}
  1163. </原始问题>
  1164. <相关帖子>
  1165. {chr(10).join(notes_info)}
  1166. </相关帖子>
  1167. 请基于以上帖子,为原始问题生成一个全面、准确的答案。
  1168. 记得在答案中使用 [1], [2] 等标注引用的帖子序号。
  1169. """.strip()
  1170. print(f"\n📝 调用答案生成Agent...")
  1171. print(f" - 可用帖子: {len(satisfied_notes)} 个")
  1172. print(f" - 平均置信度: {sum(n['confidence_score'] for n in satisfied_notes) / len(satisfied_notes):.2f}")
  1173. # 调用Agent生成答案
  1174. result_run = await Runner.run(answer_generator, agent_input)
  1175. answer_result: AnswerGeneration = result_run.final_output
  1176. # 提取被引用的帖子详情
  1177. cited_notes = []
  1178. for idx in answer_result.cited_note_indices:
  1179. if 1 <= idx <= len(satisfied_notes):
  1180. note = satisfied_notes[idx - 1]
  1181. cited_notes.append({
  1182. "index": idx,
  1183. "note_id": note['note_id'],
  1184. "title": note['title'],
  1185. "desc": note['desc'],
  1186. "confidence_score": note['confidence_score'],
  1187. # ⭐ 新增:完整帖子信息用于可视化
  1188. "cover_image": note.get('cover_image', {}),
  1189. "interact_info": note.get('interact_info', {}),
  1190. "user": note.get('user', {}),
  1191. "note_url": note.get('note_url', ''),
  1192. # ⭐ 新增:评估详情
  1193. "title_relevance": note.get('title_relevance', 0),
  1194. "content_expectation": note.get('content_expectation', 0),
  1195. "reason": note.get('reason', '')
  1196. })
  1197. result = {
  1198. "answer": answer_result.answer,
  1199. "cited_note_indices": answer_result.cited_note_indices,
  1200. "confidence": answer_result.confidence,
  1201. "summary": answer_result.summary,
  1202. "cited_notes": cited_notes
  1203. }
  1204. # 打印结果
  1205. print(f"\n✅ 答案生成完成")
  1206. print(f" - 引用帖子数: {len(answer_result.cited_note_indices)} 个")
  1207. print(f" - 答案置信度: {answer_result.confidence:.2f}")
  1208. print(f" - 答案摘要: {answer_result.summary}")
  1209. # 记录步骤
  1210. add_step(context, "生成答案", "answer_generation", {
  1211. "original_question": original_question,
  1212. "input_notes_count": len(satisfied_notes),
  1213. "result": result,
  1214. "agent_input_preview": agent_input[:500] + "..." if len(agent_input) > 500 else agent_input
  1215. })
  1216. return result
  1217. def find_qualified_queries(evaluation_results: list[dict], min_relevance_score: float = 0.7) -> list[dict]:
  1218. """
  1219. 查找所有合格的query(旧函数,保留兼容性)
  1220. 筛选标准:
  1221. 1. intent_match = True(必须满足)
  1222. 2. relevance_score >= min_relevance_score
  1223. 返回:按 relevance_score 降序排列
  1224. """
  1225. all_qualified = []
  1226. for result in evaluation_results:
  1227. for eval_item in result.get("evaluations", []):
  1228. if (eval_item['intent_match'] is True
  1229. and eval_item['relevance_score'] >= min_relevance_score):
  1230. all_qualified.append({
  1231. "from_candidate": result["candidate"],
  1232. **eval_item
  1233. })
  1234. # 按relevance_score降序排列
  1235. return sorted(all_qualified, key=lambda x: x['relevance_score'], reverse=True)
  1236. # ============================================================================
  1237. # 主流程
  1238. # ============================================================================
  1239. async def progressive_exploration(context: RunContext, max_levels: int = 4) -> dict:
  1240. """
  1241. 渐进式探索流程 - 使用独立步骤
  1242. 流程:
  1243. 1. 提取关键词 + 渐进式探索(复用旧流程)
  1244. 2. 步骤1: 评估候选query的推荐词
  1245. 3. 步骤2: 搜索合格的推荐词
  1246. 4. 步骤3: 评估搜索到的帖子
  1247. 5. 步骤4: 汇总满足需求的帖子
  1248. 6. 步骤5: 生成答案
  1249. Args:
  1250. context: 运行上下文
  1251. max_levels: 最大探索层数,默认4
  1252. 返回格式:
  1253. {
  1254. "success": True/False,
  1255. "final_answer": {...}, # 生成的答案
  1256. "satisfied_notes": [...], # 满足需求的帖子
  1257. "message": "..."
  1258. }
  1259. """
  1260. # ========== 阶段1:渐进式探索(复用旧流程找到候选query)==========
  1261. # 1.1 提取关键词
  1262. keyword_result = await extract_keywords(context.q, context)
  1263. context.keywords = keyword_result.keywords
  1264. # 1.2 渐进式探索各层级
  1265. current_level = 1
  1266. candidates_to_evaluate = []
  1267. # Level 1:单个关键词
  1268. level_1_queries = context.keywords[:7]
  1269. level_1_data = await explore_level(level_1_queries, current_level, context)
  1270. analysis_1 = await analyze_level(level_1_data, context.exploration_levels, context.q, context)
  1271. if analysis_1.should_evaluate_now:
  1272. candidates_to_evaluate.extend(analysis_1.candidates_to_evaluate)
  1273. # Level 2及以后:迭代探索
  1274. for level_num in range(2, max_levels + 1):
  1275. prev_analysis: LevelAnalysis = context.level_analyses[-1]["analysis"]
  1276. prev_analysis = LevelAnalysis(**prev_analysis)
  1277. if not prev_analysis.next_combinations:
  1278. print(f"\nLevel {level_num-1} 分析后无需继续探索")
  1279. break
  1280. level_data = await explore_level(prev_analysis.next_combinations, level_num, context)
  1281. analysis = await analyze_level(level_data, context.exploration_levels, context.q, context)
  1282. if analysis.should_evaluate_now:
  1283. candidates_to_evaluate.extend(analysis.candidates_to_evaluate)
  1284. if not candidates_to_evaluate:
  1285. return {
  1286. "success": False,
  1287. "final_answer": None,
  1288. "satisfied_notes": [],
  1289. "message": "渐进式探索未找到候选query"
  1290. }
  1291. print(f"\n{'='*60}")
  1292. print(f"渐进式探索完成,找到 {len(candidates_to_evaluate)} 个候选query")
  1293. print(f"{'='*60}")
  1294. # ========== 阶段2:新的独立步骤流程 ==========
  1295. # 步骤1: 评估候选query的推荐词
  1296. evaluation_results = await step_evaluate_query_suggestions(
  1297. candidates_to_evaluate,
  1298. context.q,
  1299. context
  1300. )
  1301. # 步骤1.5: 筛选合格的推荐词
  1302. qualified_queries = step_filter_qualified_queries(
  1303. evaluation_results,
  1304. context,
  1305. min_relevance_score=0.7
  1306. )
  1307. if not qualified_queries:
  1308. return {
  1309. "success": False,
  1310. "final_answer": None,
  1311. "satisfied_notes": [],
  1312. "message": "没有合格的推荐词"
  1313. }
  1314. # 步骤2: 搜索合格的推荐词
  1315. search_results = await step_search_qualified_queries(
  1316. qualified_queries,
  1317. context
  1318. )
  1319. if not search_results.get('searches'):
  1320. return {
  1321. "success": False,
  1322. "final_answer": None,
  1323. "satisfied_notes": [],
  1324. "message": "搜索失败"
  1325. }
  1326. # 步骤3: 评估搜索到的帖子
  1327. note_evaluation_data = await step_evaluate_search_notes(
  1328. search_results,
  1329. context.q,
  1330. context
  1331. )
  1332. # 步骤4: 汇总满足需求的帖子
  1333. satisfied_notes = step_collect_satisfied_notes(note_evaluation_data)
  1334. if not satisfied_notes:
  1335. return {
  1336. "success": False,
  1337. "final_answer": None,
  1338. "satisfied_notes": [],
  1339. "message": "未找到满足需求的帖子"
  1340. }
  1341. # 步骤5: 生成答案
  1342. final_answer = await step_generate_answer(
  1343. satisfied_notes,
  1344. context.q,
  1345. context
  1346. )
  1347. # ========== 返回最终结果 ==========
  1348. return {
  1349. "success": True,
  1350. "final_answer": final_answer,
  1351. "satisfied_notes": satisfied_notes,
  1352. "message": f"成功找到 {len(satisfied_notes)} 个满足需求的帖子,并生成答案"
  1353. }
  1354. # ============================================================================
  1355. # 输出格式化
  1356. # ============================================================================
  1357. def format_output(optimization_result: dict, context: RunContext) -> str:
  1358. """
  1359. 格式化输出结果 - 用于独立步骤流程
  1360. 包含:
  1361. - 生成的答案
  1362. - 引用的帖子详情
  1363. - 满足需求的帖子统计
  1364. """
  1365. final_answer = optimization_result.get("final_answer")
  1366. satisfied_notes = optimization_result.get("satisfied_notes", [])
  1367. output = f"原始问题:{context.q}\n"
  1368. output += f"提取的关键词:{', '.join(context.keywords or [])}\n"
  1369. output += f"探索层数:{len(context.exploration_levels)}\n"
  1370. output += f"找到满足需求的帖子:{len(satisfied_notes)} 个\n"
  1371. output += "\n" + "="*60 + "\n"
  1372. if final_answer:
  1373. output += "【生成的答案】\n\n"
  1374. output += final_answer.get("answer", "")
  1375. output += "\n\n" + "="*60 + "\n"
  1376. output += f"答案置信度:{final_answer.get('confidence', 0):.2f}\n"
  1377. output += f"答案摘要:{final_answer.get('summary', '')}\n"
  1378. output += f"引用帖子数:{len(final_answer.get('cited_note_indices', []))} 个\n"
  1379. output += "\n" + "="*60 + "\n"
  1380. output += "【引用的帖子详情】\n\n"
  1381. for cited_note in final_answer.get("cited_notes", []):
  1382. output += f"[{cited_note['index']}] {cited_note['title']}\n"
  1383. output += f" 置信度: {cited_note['confidence_score']:.2f}\n"
  1384. output += f" 描述: {cited_note['desc'][:100]}...\n"
  1385. output += f" note_id: {cited_note['note_id']}\n\n"
  1386. else:
  1387. output += "未能生成答案\n"
  1388. return output
  1389. # ============================================================================
  1390. # 主函数
  1391. # ============================================================================
  1392. async def main(input_dir: str, max_levels: int = 4):
  1393. """
  1394. 主函数 - 使用独立步骤流程(方案A)
  1395. """
  1396. current_time, log_url = set_trace()
  1397. # 从目录中读取固定文件名
  1398. input_context_file = os.path.join(input_dir, 'context.md')
  1399. input_q_file = os.path.join(input_dir, 'q.md')
  1400. q_context = read_file_as_string(input_context_file)
  1401. q = read_file_as_string(input_q_file)
  1402. q_with_context = f"""
  1403. <需求上下文>
  1404. {q_context}
  1405. </需求上下文>
  1406. <当前问题>
  1407. {q}
  1408. </当前问题>
  1409. """.strip()
  1410. # 获取当前文件名作为版本
  1411. version = os.path.basename(__file__)
  1412. version_name = os.path.splitext(version)[0]
  1413. # 日志保存目录
  1414. log_dir = os.path.join(input_dir, "output", version_name, current_time)
  1415. run_context = RunContext(
  1416. version=version,
  1417. input_files={
  1418. "input_dir": input_dir,
  1419. "context_file": input_context_file,
  1420. "q_file": input_q_file,
  1421. },
  1422. q_with_context=q_with_context,
  1423. q_context=q_context,
  1424. q=q,
  1425. log_dir=log_dir,
  1426. log_url=log_url,
  1427. )
  1428. # 执行渐进式探索
  1429. optimization_result = await progressive_exploration(run_context, max_levels=max_levels)
  1430. # 格式化输出
  1431. final_output = format_output(optimization_result, run_context)
  1432. print(f"\n{'='*60}")
  1433. print("最终结果")
  1434. print(f"{'='*60}")
  1435. print(final_output)
  1436. # 保存结果
  1437. run_context.optimization_result = optimization_result
  1438. run_context.final_output = final_output
  1439. # 记录最终输出步骤(新格式)
  1440. final_answer = optimization_result.get("final_answer")
  1441. satisfied_notes = optimization_result.get("satisfied_notes", [])
  1442. add_step(run_context, "生成最终结果", "final_result", {
  1443. "success": optimization_result["success"],
  1444. "message": optimization_result["message"],
  1445. "satisfied_notes_count": len(satisfied_notes),
  1446. "final_answer": final_answer,
  1447. "satisfied_notes_summary": [
  1448. {
  1449. "note_id": note["note_id"],
  1450. "title": note["title"],
  1451. "confidence_score": note["confidence_score"]
  1452. }
  1453. for note in satisfied_notes[:10] # 只保存前10个摘要
  1454. ] if satisfied_notes else [],
  1455. "final_output": final_output
  1456. })
  1457. # 保存 RunContext 到 log_dir(不包含 steps,steps 单独保存)
  1458. os.makedirs(run_context.log_dir, exist_ok=True)
  1459. context_file_path = os.path.join(run_context.log_dir, "run_context.json")
  1460. context_dict = run_context.model_dump()
  1461. context_dict.pop('steps', None) # 移除 steps,避免数据冗余
  1462. with open(context_file_path, "w", encoding="utf-8") as f:
  1463. json.dump(context_dict, f, ensure_ascii=False, indent=2)
  1464. print(f"\nRunContext saved to: {context_file_path}")
  1465. # 保存步骤化日志
  1466. steps_file_path = os.path.join(run_context.log_dir, "steps.json")
  1467. with open(steps_file_path, "w", encoding="utf-8") as f:
  1468. json.dump(run_context.steps, f, ensure_ascii=False, indent=2)
  1469. print(f"Steps log saved to: {steps_file_path}")
  1470. if __name__ == "__main__":
  1471. parser = argparse.ArgumentParser(description="搜索query优化工具 - v6.1.2.3 独立步骤+答案生成版")
  1472. parser.add_argument(
  1473. "--input-dir",
  1474. type=str,
  1475. default="input/简单扣图",
  1476. help="输入目录路径,默认: input/简单扣图"
  1477. )
  1478. parser.add_argument(
  1479. "--max-levels",
  1480. type=int,
  1481. default=4,
  1482. help="最大探索层数,默认: 4"
  1483. )
  1484. args = parser.parse_args()
  1485. asyncio.run(main(args.input_dir, max_levels=args.max_levels))