sug_v6_1_2_1.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811
  1. import asyncio
  2. import json
  3. import os
  4. import argparse
  5. from datetime import datetime
  6. from agents import Agent, Runner
  7. from lib.my_trace import set_trace
  8. from typing import Literal
  9. from pydantic import BaseModel, Field
  10. from lib.utils import read_file_as_string
  11. from lib.client import get_model
  12. MODEL_NAME = "google/gemini-2.5-flash"
  13. from script.search_recommendations.xiaohongshu_search_recommendations import XiaohongshuSearchRecommendations
  14. class RunContext(BaseModel):
  15. version: str = Field(..., description="当前运行的脚本版本(文件名)")
  16. input_files: dict[str, str] = Field(..., description="输入文件路径映射")
  17. q_with_context: str
  18. q_context: str
  19. q: str
  20. log_url: str
  21. log_dir: str
  22. # 步骤化日志
  23. steps: list[dict] = Field(default_factory=list, description="执行步骤的详细记录")
  24. # 探索阶段记录(保留用于向后兼容)
  25. keywords: list[str] | None = Field(default=None, description="提取的关键词")
  26. exploration_levels: list[dict] = Field(default_factory=list, description="每一层的探索结果")
  27. level_analyses: list[dict] = Field(default_factory=list, description="每一层的主Agent分析")
  28. # 最终结果
  29. final_candidates: list[str] | None = Field(default=None, description="最终选出的候选query")
  30. evaluation_results: list[dict] | None = Field(default=None, description="候选query的评估结果")
  31. optimization_result: dict | None = Field(default=None, description="最终优化结果对象")
  32. final_output: str | None = Field(default=None, description="最终输出结果(格式化文本)")
  33. # ============================================================================
  34. # Agent 1: 关键词提取专家
  35. # ============================================================================
  36. keyword_extraction_instructions = """
  37. 你是关键词提取专家。给定一个搜索问题(含上下文),提取出**最细粒度的关键概念**。
  38. ## 提取原则
  39. 1. **细粒度优先**:拆分成最小的有意义单元
  40. - 不要保留完整的长句
  41. - 拆分成独立的、有搜索意义的词或短语
  42. 2. **保留核心维度**:
  43. - 地域/对象
  44. - 时间
  45. - 行为/意图:获取、教程、推荐、如何等
  46. - 主题/领域
  47. - 质量/属性
  48. 3. **去掉无意义的虚词**:的、吗、呢等
  49. 4. **保留领域专有词**:不要过度拆分专业术语
  50. - 如果是常见的组合词,保持完整
  51. ## 输出要求
  52. 输出关键词列表,按重要性排序(最核心的在前)。
  53. """.strip()
  54. class KeywordList(BaseModel):
  55. """关键词列表"""
  56. keywords: list[str] = Field(..., description="提取的关键词,按重要性排序")
  57. reasoning: str = Field(..., description="提取理由")
  58. keyword_extractor = Agent[None](
  59. name="关键词提取专家",
  60. instructions=keyword_extraction_instructions,
  61. model=get_model(MODEL_NAME),
  62. output_type=KeywordList,
  63. )
  64. # ============================================================================
  65. # Agent 2: 层级探索分析专家
  66. # ============================================================================
  67. level_analysis_instructions = """
  68. 你是搜索空间探索分析专家。基于当前层级的探索结果,决定下一步行动。
  69. ## 你的任务
  70. 分析当前已探索的词汇空间,判断:
  71. 1. **发现了什么有价值的信号?**
  72. 2. **是否已经可以评估候选了?**
  73. 3. **如果还不够,下一层应该探索什么组合?**
  74. ## 分析维度
  75. ### 1. 信号识别(最重要)
  76. 看推荐词里**出现了什么主题**:
  77. **关键问题:**
  78. - 哪些推荐词**最接近原始需求**?
  79. - 哪些推荐词**揭示了有价值的方向**(即使不完全匹配)?
  80. - 哪些推荐词可以作为**下一层探索的桥梁**?
  81. - 系统对哪些概念理解得好?哪些理解偏了?
  82. ### 2. 组合策略
  83. 基于发现的信号,设计下一层探索:
  84. **组合类型:**
  85. a) **关键词直接组合**
  86. - 两个关键词组合成新query
  87. b) **利用推荐词作为桥梁**(重要!)
  88. - 发现某个推荐词很有价值 → 直接探索这个推荐词
  89. - 或在推荐词基础上加其他关键词
  90. c) **跨层级组合**
  91. - 结合多层发现的有价值推荐词
  92. - 组合成更复杂的query
  93. ### 3. 停止条件
  94. **何时可以评估候选?**
  95. 满足以下之一:
  96. - 推荐词中出现了**明确包含原始需求多个核心要素的query**
  97. - 已经探索到**足够复杂的组合**(3-4个关键词),且推荐词相关
  98. - 探索了**3-4层**,信息已经足够丰富
  99. **何时继续探索?**
  100. - 当前推荐词太泛,没有接近原始需求
  101. - 发现了有价值的信号,但需要进一步组合验证
  102. - 层数还少(< 3层)
  103. ## 输出要求
  104. ### 1. key_findings
  105. 总结当前层发现的关键信息,包括:
  106. - 哪些推荐词最有价值?
  107. - 系统对哪些概念理解得好/不好?
  108. - 发现了什么意外的方向?
  109. ### 2. promising_signals
  110. 列出最有价值的推荐词(来自任何已探索的query),每个说明为什么有价值
  111. ### 3. should_evaluate_now
  112. 是否已经可以开始评估候选了?true/false
  113. ### 4. candidates_to_evaluate
  114. 如果should_evaluate_now=true,列出应该评估的候选query
  115. - 可以是推荐词
  116. - 可以是自己构造的组合
  117. ### 5. next_combinations
  118. 如果should_evaluate_now=false,列出下一层应该探索的query组合
  119. ### 6. reasoning
  120. 详细的推理过程
  121. ## 重要原则
  122. 1. **不要过早评估**:至少探索2层,除非第一层就发现了完美匹配
  123. 2. **充分利用推荐词**:推荐词是系统给的提示,要善用
  124. 3. **保持探索方向的多样性**:不要只盯着一个方向
  125. 4. **识别死胡同**:如果某个方向的推荐词一直不相关,果断放弃
  126. """.strip()
  127. class PromisingSignal(BaseModel):
  128. """有价值的推荐词信号"""
  129. query: str = Field(..., description="推荐词")
  130. from_level: int = Field(..., description="来自哪一层")
  131. reason: str = Field(..., description="为什么有价值")
  132. class LevelAnalysis(BaseModel):
  133. """层级分析结果"""
  134. key_findings: str = Field(..., description="当前层的关键发现")
  135. promising_signals: list[PromisingSignal] = Field(..., description="有价值的推荐词信号")
  136. should_evaluate_now: bool = Field(..., description="是否应该开始评估候选")
  137. candidates_to_evaluate: list[str] = Field(default_factory=list, description="如果should_evaluate_now=true,要评估的候选query列表")
  138. next_combinations: list[str] = Field(default_factory=list, description="如果should_evaluate_now=false,下一层要探索的query组合")
  139. reasoning: str = Field(..., description="详细的推理过程")
  140. level_analyzer = Agent[None](
  141. name="层级探索分析专家",
  142. instructions=level_analysis_instructions,
  143. model=get_model(MODEL_NAME),
  144. output_type=LevelAnalysis,
  145. )
  146. # ============================================================================
  147. # Agent 3: 评估专家(简化版:意图匹配 + 相关性评分)
  148. # ============================================================================
  149. eval_instructions = """
  150. 你是搜索query评估专家。给定原始问题和推荐query,评估两个维度。
  151. ## 评估目标
  152. 用这个推荐query搜索,能否找到满足原始需求的内容?
  153. ## 两层评分
  154. ### 1. intent_match(意图匹配)= true/false
  155. 推荐query的**使用意图**是否与原问题一致?
  156. **核心问题:用户搜索这个推荐词,想做什么?**
  157. **判断标准:**
  158. - 原问题意图:找方法?找教程?找资源/素材?找工具?看作品?
  159. - 推荐词意图:如果用户搜索这个词,他的目的是什么?
  160. **示例:**
  161. - 原问题意图="找素材"
  162. - ✅ true: "素材下载"、"素材网站"、"免费素材"(都是获取素材)
  163. - ❌ false: "素材制作教程"、"如何制作素材"(意图变成学习了)
  164. - 原问题意图="学教程"
  165. - ✅ true: "教程视频"、"教学步骤"、"入门指南"
  166. - ❌ false: "成品展示"、"作品欣赏"(意图变成看作品了)
  167. **评分:**
  168. - true = 意图一致,搜索推荐词能达到原问题的目的
  169. - false = 意图改变,搜索推荐词无法达到原问题的目的
  170. ### 2. relevance_score(相关性)= 0-1 连续分数
  171. 推荐query在**主题、要素、属性**上与原问题的相关程度?
  172. **评估维度:**
  173. - 主题相关:核心主题是否匹配?(如:摄影、旅游、美食)
  174. - 要素覆盖:关键要素保留了多少?(如:地域、时间、对象、工具)
  175. - 属性匹配:质量、风格、特色等属性是否保留?
  176. **评分参考:**
  177. - 0.9-1.0 = 几乎完美匹配,所有核心要素都保留
  178. - 0.7-0.8 = 高度相关,核心要素保留,少数次要要素缺失
  179. - 0.5-0.6 = 中度相关,主题匹配但多个要素缺失
  180. - 0.3-0.4 = 低度相关,只有部分主题相关
  181. - 0-0.2 = 基本不相关
  182. ## 评估策略
  183. 1. **先判断 intent_match**:意图不匹配直接 false,无论相关性多高
  184. 2. **再评估 relevance_score**:在意图匹配的前提下,计算相关性
  185. ## 输出要求
  186. - intent_match: true/false
  187. - relevance_score: 0-1 的浮点数
  188. - reason: 详细的评估理由,需要说明:
  189. - 原问题的意图是什么
  190. - 推荐词的意图是什么
  191. - 为什么判断意图匹配/不匹配
  192. - 相关性分数的依据(哪些要素保留/缺失)
  193. """.strip()
  194. class RelevanceEvaluation(BaseModel):
  195. """评估反馈模型 - 意图匹配 + 相关性"""
  196. intent_match: bool = Field(..., description="意图是否匹配")
  197. relevance_score: float = Field(..., description="相关性分数 0-1,分数越高越相关")
  198. reason: str = Field(..., description="评估理由,需说明意图判断和相关性依据")
  199. evaluator = Agent[None](
  200. name="评估专家",
  201. instructions=eval_instructions,
  202. model=get_model(MODEL_NAME),
  203. output_type=RelevanceEvaluation,
  204. )
  205. # ============================================================================
  206. # 日志辅助函数
  207. # ============================================================================
  208. def add_step(context: RunContext, step_name: str, step_type: str, data: dict):
  209. """添加步骤记录"""
  210. step = {
  211. "step_number": len(context.steps) + 1,
  212. "step_name": step_name,
  213. "step_type": step_type,
  214. "timestamp": datetime.now().isoformat(),
  215. "data": data
  216. }
  217. context.steps.append(step)
  218. return step
  219. # ============================================================================
  220. # 核心函数
  221. # ============================================================================
  222. async def extract_keywords(q: str, context: RunContext) -> KeywordList:
  223. """提取关键词"""
  224. print("\n[步骤 1] 正在提取关键词...")
  225. result = await Runner.run(keyword_extractor, q)
  226. keyword_list: KeywordList = result.final_output
  227. print(f"提取的关键词:{keyword_list.keywords}")
  228. print(f"提取理由:{keyword_list.reasoning}")
  229. # 记录步骤
  230. add_step(context, "提取关键词", "keyword_extraction", {
  231. "input_question": q,
  232. "keywords": keyword_list.keywords,
  233. "reasoning": keyword_list.reasoning
  234. })
  235. return keyword_list
  236. async def explore_level(queries: list[str], level_num: int, context: RunContext) -> dict:
  237. """探索一个层级(并发获取所有query的推荐词)"""
  238. step_num = len(context.steps) + 1
  239. print(f"\n{'='*60}")
  240. print(f"[步骤 {step_num}] Level {level_num} 探索:{len(queries)} 个query")
  241. print(f"{'='*60}")
  242. xiaohongshu_api = XiaohongshuSearchRecommendations()
  243. # 并发获取所有推荐词
  244. async def get_single_sug(query: str):
  245. print(f" 探索: {query}")
  246. suggestions = xiaohongshu_api.get_recommendations(keyword=query)
  247. print(f" → {len(suggestions) if suggestions else 0} 个推荐词")
  248. return {
  249. "query": query,
  250. "suggestions": suggestions or []
  251. }
  252. results = await asyncio.gather(*[get_single_sug(q) for q in queries])
  253. level_data = {
  254. "level": level_num,
  255. "timestamp": datetime.now().isoformat(),
  256. "queries": results
  257. }
  258. context.exploration_levels.append(level_data)
  259. # 记录步骤
  260. add_step(context, f"Level {level_num} 探索", "level_exploration", {
  261. "level": level_num,
  262. "input_queries": queries,
  263. "query_count": len(queries),
  264. "results": results,
  265. "total_suggestions": sum(len(r['suggestions']) for r in results)
  266. })
  267. return level_data
  268. async def analyze_level(level_data: dict, all_levels: list[dict], original_question: str, context: RunContext) -> LevelAnalysis:
  269. """分析当前层级,决定下一步"""
  270. step_num = len(context.steps) + 1
  271. print(f"\n[步骤 {step_num}] 正在分析 Level {level_data['level']}...")
  272. # 构造输入
  273. analysis_input = f"""
  274. <原始问题>
  275. {original_question}
  276. </原始问题>
  277. <已探索的所有层级>
  278. {json.dumps(all_levels, ensure_ascii=False, indent=2)}
  279. </已探索的所有层级>
  280. <当前层级>
  281. Level {level_data['level']}
  282. {json.dumps(level_data['queries'], ensure_ascii=False, indent=2)}
  283. </当前层级>
  284. 请分析当前探索状态,决定下一步行动。
  285. """
  286. result = await Runner.run(level_analyzer, analysis_input)
  287. analysis: LevelAnalysis = result.final_output
  288. print(f"\n分析结果:")
  289. print(f" 关键发现:{analysis.key_findings}")
  290. print(f" 有价值的信号:{len(analysis.promising_signals)} 个")
  291. print(f" 是否评估:{analysis.should_evaluate_now}")
  292. if analysis.should_evaluate_now:
  293. print(f" 候选query:{analysis.candidates_to_evaluate}")
  294. else:
  295. print(f" 下一层探索:{analysis.next_combinations}")
  296. # 保存分析结果
  297. context.level_analyses.append({
  298. "level": level_data['level'],
  299. "timestamp": datetime.now().isoformat(),
  300. "analysis": analysis.model_dump()
  301. })
  302. # 记录步骤
  303. add_step(context, f"Level {level_data['level']} 分析", "level_analysis", {
  304. "level": level_data['level'],
  305. "key_findings": analysis.key_findings,
  306. "promising_signals_count": len(analysis.promising_signals),
  307. "promising_signals": [s.model_dump() for s in analysis.promising_signals],
  308. "should_evaluate_now": analysis.should_evaluate_now,
  309. "candidates_to_evaluate": analysis.candidates_to_evaluate if analysis.should_evaluate_now else [],
  310. "next_combinations": analysis.next_combinations if not analysis.should_evaluate_now else [],
  311. "reasoning": analysis.reasoning
  312. })
  313. return analysis
  314. async def evaluate_candidates(candidates: list[str], original_question: str, context: RunContext) -> list[dict]:
  315. """评估候选query"""
  316. step_num = len(context.steps) + 1
  317. print(f"\n{'='*60}")
  318. print(f"[步骤 {step_num}] 评估 {len(candidates)} 个候选query")
  319. print(f"{'='*60}")
  320. xiaohongshu_api = XiaohongshuSearchRecommendations()
  321. async def evaluate_single_candidate(candidate: str):
  322. print(f"\n评估候选:{candidate}")
  323. # 1. 获取推荐词
  324. suggestions = xiaohongshu_api.get_recommendations(keyword=candidate)
  325. print(f" 获取到 {len(suggestions) if suggestions else 0} 个推荐词")
  326. if not suggestions:
  327. return {
  328. "candidate": candidate,
  329. "suggestions": [],
  330. "evaluations": []
  331. }
  332. # 2. 评估每个推荐词
  333. async def eval_single_sug(sug: str):
  334. eval_input = f"""
  335. <原始问题>
  336. {original_question}
  337. </原始问题>
  338. <待评估的推荐query>
  339. {sug}
  340. </待评估的推荐query>
  341. 请评估该推荐query:
  342. 1. intent_match: 意图是否匹配(true/false)
  343. 2. relevance_score: 相关性分数(0-1)
  344. 3. reason: 详细的评估理由
  345. """
  346. result = await Runner.run(evaluator, eval_input)
  347. evaluation: RelevanceEvaluation = result.final_output
  348. return {
  349. "query": sug,
  350. "intent_match": evaluation.intent_match,
  351. "relevance_score": evaluation.relevance_score,
  352. "reason": evaluation.reason,
  353. }
  354. evaluations = await asyncio.gather(*[eval_single_sug(s) for s in suggestions])
  355. return {
  356. "candidate": candidate,
  357. "suggestions": suggestions,
  358. "evaluations": evaluations
  359. }
  360. results = await asyncio.gather(*[evaluate_single_candidate(c) for c in candidates])
  361. context.evaluation_results = results
  362. # 记录步骤
  363. add_step(context, "评估候选query", "candidate_evaluation", {
  364. "candidate_count": len(candidates),
  365. "candidates": candidates,
  366. "results": results,
  367. "total_evaluations": sum(len(r['evaluations']) for r in results)
  368. })
  369. return results
  370. def find_qualified_queries(evaluation_results: list[dict], min_relevance_score: float = 0.7) -> list[dict]:
  371. """
  372. 查找所有合格的query
  373. 筛选标准:
  374. 1. intent_match = True(必须满足)
  375. 2. relevance_score >= min_relevance_score
  376. 返回:按 relevance_score 降序排列
  377. """
  378. all_qualified = []
  379. for result in evaluation_results:
  380. for eval_item in result.get("evaluations", []):
  381. if (eval_item['intent_match'] is True
  382. and eval_item['relevance_score'] >= min_relevance_score):
  383. all_qualified.append({
  384. "from_candidate": result["candidate"],
  385. **eval_item
  386. })
  387. # 按relevance_score降序排列
  388. return sorted(all_qualified, key=lambda x: x['relevance_score'], reverse=True)
  389. # ============================================================================
  390. # 主流程
  391. # ============================================================================
  392. async def progressive_exploration(context: RunContext, max_levels: int = 4) -> dict:
  393. """
  394. 渐进式广度探索流程
  395. Args:
  396. context: 运行上下文
  397. max_levels: 最大探索层数,默认4
  398. 返回格式:
  399. {
  400. "success": True/False,
  401. "results": [...],
  402. "message": "..."
  403. }
  404. """
  405. # 阶段1:提取关键词(从原始问题提取)
  406. keyword_result = await extract_keywords(context.q, context)
  407. context.keywords = keyword_result.keywords
  408. # 阶段2:渐进式探索
  409. current_level = 1
  410. # Level 1:单个关键词
  411. level_1_queries = context.keywords[:7] # 限制最多7个关键词
  412. level_1_data = await explore_level(level_1_queries, current_level, context)
  413. # 分析Level 1
  414. analysis_1 = await analyze_level(level_1_data, context.exploration_levels, context.q, context)
  415. if analysis_1.should_evaluate_now:
  416. # 直接评估
  417. eval_results = await evaluate_candidates(analysis_1.candidates_to_evaluate, context.q, context)
  418. qualified = find_qualified_queries(eval_results, min_relevance_score=0.7)
  419. if qualified:
  420. return {
  421. "success": True,
  422. "results": qualified,
  423. "message": f"Level 1 即找到 {len(qualified)} 个合格query"
  424. }
  425. # Level 2 及以后:迭代探索
  426. for level_num in range(2, max_levels + 1):
  427. # 获取上一层的分析结果
  428. prev_analysis: LevelAnalysis = context.level_analyses[-1]["analysis"]
  429. prev_analysis = LevelAnalysis(**prev_analysis) # 转回对象
  430. if not prev_analysis.next_combinations:
  431. print(f"\nLevel {level_num-1} 分析后无需继续探索")
  432. break
  433. # 探索当前层
  434. level_data = await explore_level(prev_analysis.next_combinations, level_num, context)
  435. # 分析当前层
  436. analysis = await analyze_level(level_data, context.exploration_levels, context.q, context)
  437. if analysis.should_evaluate_now:
  438. # 评估候选
  439. eval_results = await evaluate_candidates(analysis.candidates_to_evaluate, context.q, context)
  440. qualified = find_qualified_queries(eval_results, min_relevance_score=0.7)
  441. if qualified:
  442. return {
  443. "success": True,
  444. "results": qualified,
  445. "message": f"Level {level_num} 找到 {len(qualified)} 个合格query"
  446. }
  447. # 所有层探索完,降低标准
  448. print(f"\n{'='*60}")
  449. print(f"探索完 {max_levels} 层,降低标准(relevance_score >= 0.5)")
  450. print(f"{'='*60}")
  451. if context.evaluation_results:
  452. acceptable = find_qualified_queries(context.evaluation_results, min_relevance_score=0.5)
  453. if acceptable:
  454. return {
  455. "success": True,
  456. "results": acceptable,
  457. "message": f"找到 {len(acceptable)} 个可接受query(soft_score >= 0.5)"
  458. }
  459. # 完全失败
  460. return {
  461. "success": False,
  462. "results": [],
  463. "message": "探索完所有层级,未找到合格的推荐词"
  464. }
  465. # ============================================================================
  466. # 输出格式化
  467. # ============================================================================
  468. def format_output(optimization_result: dict, context: RunContext) -> str:
  469. """格式化输出结果"""
  470. results = optimization_result.get("results", [])
  471. output = f"原始问题:{context.q}\n"
  472. output += f"提取的关键词:{', '.join(context.keywords or [])}\n"
  473. output += f"探索层数:{len(context.exploration_levels)}\n"
  474. output += f"状态:{optimization_result['message']}\n\n"
  475. if optimization_result["success"] and results:
  476. output += "合格的推荐query(按relevance_score降序):\n"
  477. for i, result in enumerate(results, 1):
  478. output += f"\n{i}. {result['query']}\n"
  479. output += f" - 来自候选:{result['from_candidate']}\n"
  480. output += f" - 意图匹配:{result['intent_match']} (True=意图一致)\n"
  481. output += f" - 相关性分数:{result['relevance_score']:.2f} (0-1,越高越相关)\n"
  482. output += f" - 评估理由:{result['reason']}\n"
  483. else:
  484. output += "结果:未找到合格推荐query\n"
  485. if context.level_analyses:
  486. last_analysis = context.level_analyses[-1]["analysis"]
  487. output += f"\n最后一层分析:\n{last_analysis.get('key_findings', 'N/A')}\n"
  488. return output.strip()
  489. # ============================================================================
  490. # 主函数
  491. # ============================================================================
  492. async def main(input_dir: str, max_levels: int = 4):
  493. current_time, log_url = set_trace()
  494. # 从目录中读取固定文件名
  495. input_context_file = os.path.join(input_dir, 'context.md')
  496. input_q_file = os.path.join(input_dir, 'q.md')
  497. q_context = read_file_as_string(input_context_file)
  498. q = read_file_as_string(input_q_file)
  499. q_with_context = f"""
  500. <需求上下文>
  501. {q_context}
  502. </需求上下文>
  503. <当前问题>
  504. {q}
  505. </当前问题>
  506. """.strip()
  507. # 获取当前文件名作为版本
  508. version = os.path.basename(__file__)
  509. version_name = os.path.splitext(version)[0]
  510. # 日志保存目录
  511. log_dir = os.path.join(input_dir, "output", version_name, current_time)
  512. run_context = RunContext(
  513. version=version,
  514. input_files={
  515. "input_dir": input_dir,
  516. "context_file": input_context_file,
  517. "q_file": input_q_file,
  518. },
  519. q_with_context=q_with_context,
  520. q_context=q_context,
  521. q=q,
  522. log_dir=log_dir,
  523. log_url=log_url,
  524. )
  525. # 执行渐进式探索
  526. optimization_result = await progressive_exploration(run_context, max_levels=max_levels)
  527. # 格式化输出
  528. final_output = format_output(optimization_result, run_context)
  529. print(f"\n{'='*60}")
  530. print("最终结果")
  531. print(f"{'='*60}")
  532. print(final_output)
  533. # 保存结果
  534. run_context.optimization_result = optimization_result
  535. run_context.final_output = final_output
  536. # 记录最终输出步骤(保存完整的结果详情)
  537. qualified_results = optimization_result.get("results", [])
  538. add_step(run_context, "生成最终结果", "final_result", {
  539. "success": optimization_result["success"],
  540. "message": optimization_result["message"],
  541. "qualified_query_count": len(qualified_results),
  542. "qualified_queries": [r["query"] for r in qualified_results], # 保存所有合格query
  543. "qualified_results_detail": [ # 保存完整的评估详情
  544. {
  545. "rank": idx + 1,
  546. "query": r["query"],
  547. "from_candidate": r["from_candidate"],
  548. "intent_match": r["intent_match"],
  549. "relevance_score": r["relevance_score"],
  550. "reason": r["reason"]
  551. }
  552. for idx, r in enumerate(qualified_results)
  553. ],
  554. "final_output": final_output
  555. })
  556. # 保存 RunContext 到 log_dir
  557. os.makedirs(run_context.log_dir, exist_ok=True)
  558. context_file_path = os.path.join(run_context.log_dir, "run_context.json")
  559. with open(context_file_path, "w", encoding="utf-8") as f:
  560. json.dump(run_context.model_dump(), f, ensure_ascii=False, indent=2)
  561. print(f"\nRunContext saved to: {context_file_path}")
  562. # 保存步骤化日志(更直观的格式)
  563. steps_file_path = os.path.join(run_context.log_dir, "steps.json")
  564. with open(steps_file_path, "w", encoding="utf-8") as f:
  565. json.dump(run_context.steps, f, ensure_ascii=False, indent=2)
  566. print(f"Steps log saved to: {steps_file_path}")
  567. # 生成步骤化的可读文本日志
  568. steps_text_path = os.path.join(run_context.log_dir, "steps.md")
  569. with open(steps_text_path, "w", encoding="utf-8") as f:
  570. f.write(f"# 执行步骤日志\n\n")
  571. f.write(f"**原始问题**: {run_context.q}\n\n")
  572. f.write(f"**执行版本**: {run_context.version}\n\n")
  573. f.write(f"**总步骤数**: {len(run_context.steps)}\n\n")
  574. f.write("---\n\n")
  575. for step in run_context.steps:
  576. f.write(f"## 步骤 {step['step_number']}: {step['step_name']}\n\n")
  577. f.write(f"**类型**: `{step['step_type']}`\n\n")
  578. f.write(f"**时间**: {step['timestamp']}\n\n")
  579. # 根据不同类型格式化数据
  580. if step['step_type'] == 'keyword_extraction':
  581. f.write(f"**提取的关键词**: {', '.join(step['data']['keywords'])}\n\n")
  582. f.write(f"**提取理由**: {step['data']['reasoning']}\n\n")
  583. elif step['step_type'] == 'level_exploration':
  584. f.write(f"**探索层级**: Level {step['data']['level']}\n\n")
  585. f.write(f"**输入query数量**: {step['data']['query_count']}\n\n")
  586. f.write(f"**总推荐词数**: {step['data']['total_suggestions']}\n\n")
  587. f.write(f"**探索的query**: {', '.join(step['data']['input_queries'])}\n\n")
  588. elif step['step_type'] == 'level_analysis':
  589. f.write(f"**关键发现**: {step['data']['key_findings']}\n\n")
  590. f.write(f"**有价值信号数**: {step['data']['promising_signals_count']}\n\n")
  591. f.write(f"**是否评估**: {step['data']['should_evaluate_now']}\n\n")
  592. if step['data']['should_evaluate_now']:
  593. f.write(f"**候选query**: {', '.join(step['data']['candidates_to_evaluate'])}\n\n")
  594. else:
  595. f.write(f"**下一层探索**: {', '.join(step['data']['next_combinations'])}\n\n")
  596. elif step['step_type'] == 'candidate_evaluation':
  597. f.write(f"**评估候选数**: {step['data']['candidate_count']}\n\n")
  598. f.write(f"**候选query**: {', '.join(step['data']['candidates'])}\n\n")
  599. f.write(f"**总评估数**: {step['data']['total_evaluations']}\n\n")
  600. elif step['step_type'] == 'final_result':
  601. f.write(f"**执行状态**: {'✅ 成功' if step['data']['success'] else '❌ 失败'}\n\n")
  602. f.write(f"**结果消息**: {step['data']['message']}\n\n")
  603. f.write(f"**合格query数量**: {step['data']['qualified_query_count']}\n\n")
  604. # 显示详细的评估结果
  605. if step['data'].get('qualified_results_detail'):
  606. f.write(f"### 合格的query详情\n\n")
  607. for result in step['data']['qualified_results_detail']:
  608. f.write(f"#### {result['rank']}. {result['query']}\n\n")
  609. f.write(f"- **来自候选**: {result['from_candidate']}\n")
  610. f.write(f"- **意图匹配**: {'✅ 是' if result['intent_match'] else '❌ 否'}\n")
  611. f.write(f"- **相关性分数**: {result['relevance_score']:.2f}\n")
  612. f.write(f"- **评估理由**: {result['reason']}\n\n")
  613. elif step['data']['qualified_queries']:
  614. # 兼容旧格式(如果没有详情)
  615. f.write(f"**合格的query列表**:\n")
  616. for idx, q in enumerate(step['data']['qualified_queries'], 1):
  617. f.write(f" {idx}. {q}\n")
  618. f.write("\n")
  619. f.write(f"### 完整输出\n\n```\n{step['data']['final_output']}\n```\n\n")
  620. f.write("---\n\n")
  621. print(f"Steps markdown saved to: {steps_text_path}")
  622. if __name__ == "__main__":
  623. parser = argparse.ArgumentParser(description="搜索query优化工具 - v6.1 意图匹配+相关性评分版")
  624. parser.add_argument(
  625. "--input-dir",
  626. type=str,
  627. default="input/简单扣图",
  628. help="输入目录路径,默认: input/简单扣图"
  629. )
  630. parser.add_argument(
  631. "--max-levels",
  632. type=int,
  633. default=4,
  634. help="最大探索层数,默认: 4"
  635. )
  636. args = parser.parse_args()
  637. asyncio.run(main(args.input_dir, max_levels=args.max_levels))