sug_v6_0_progressive_exploration.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632
  1. import asyncio
  2. import json
  3. import os
  4. import argparse
  5. from datetime import datetime
  6. from agents import Agent, Runner
  7. from lib.my_trace import set_trace
  8. from typing import Literal
  9. from pydantic import BaseModel, Field
  10. from lib.utils import read_file_as_string
  11. from script.search_recommendations.xiaohongshu_search_recommendations import XiaohongshuSearchRecommendations
  12. class RunContext(BaseModel):
  13. version: str = Field(..., description="当前运行的脚本版本(文件名)")
  14. input_files: dict[str, str] = Field(..., description="输入文件路径映射")
  15. q_with_context: str
  16. q_context: str
  17. q: str
  18. log_url: str
  19. log_dir: str
  20. # 探索阶段记录
  21. keywords: list[str] | None = Field(default=None, description="提取的关键词")
  22. exploration_levels: list[dict] = Field(default_factory=list, description="每一层的探索结果")
  23. level_analyses: list[dict] = Field(default_factory=list, description="每一层的主Agent分析")
  24. # 最终结果
  25. final_candidates: list[str] | None = Field(default=None, description="最终选出的候选query")
  26. evaluation_results: list[dict] | None = Field(default=None, description="候选query的评估结果")
  27. optimization_result: dict | None = Field(default=None, description="最终优化结果对象")
  28. final_output: str | None = Field(default=None, description="最终输出结果(格式化文本)")
  29. # ============================================================================
  30. # Agent 1: 关键词提取专家
  31. # ============================================================================
  32. keyword_extraction_instructions = """
  33. 你是关键词提取专家。给定一个搜索问题(含上下文),提取出**最细粒度的关键概念**。
  34. ## 提取原则
  35. 1. **细粒度优先**:拆分成最小的有意义单元
  36. - 不要保留完整的长句
  37. - 拆分成独立的、有搜索意义的词或短语
  38. 2. **保留核心维度**:
  39. - 地域/对象
  40. - 时间
  41. - 行为/意图:获取、教程、推荐、如何等
  42. - 主题/领域
  43. - 质量/属性
  44. 3. **去掉无意义的虚词**:的、吗、呢等
  45. 4. **保留领域专有词**:不要过度拆分专业术语
  46. - 如果是常见的组合词,保持完整
  47. ## 输出要求
  48. 输出关键词列表,按重要性排序(最核心的在前)。
  49. """.strip()
  50. class KeywordList(BaseModel):
  51. """关键词列表"""
  52. keywords: list[str] = Field(..., description="提取的关键词,按重要性排序")
  53. reasoning: str = Field(..., description="提取理由")
  54. keyword_extractor = Agent[None](
  55. name="关键词提取专家",
  56. instructions=keyword_extraction_instructions,
  57. output_type=KeywordList,
  58. )
  59. # ============================================================================
  60. # Agent 2: 层级探索分析专家
  61. # ============================================================================
  62. level_analysis_instructions = """
  63. 你是搜索空间探索分析专家。基于当前层级的探索结果,决定下一步行动。
  64. ## 你的任务
  65. 分析当前已探索的词汇空间,判断:
  66. 1. **发现了什么有价值的信号?**
  67. 2. **是否已经可以评估候选了?**
  68. 3. **如果还不够,下一层应该探索什么组合?**
  69. ## 分析维度
  70. ### 1. 信号识别(最重要)
  71. 看推荐词里**出现了什么主题**:
  72. **关键问题:**
  73. - 哪些推荐词**最接近原始需求**?
  74. - 哪些推荐词**揭示了有价值的方向**(即使不完全匹配)?
  75. - 哪些推荐词可以作为**下一层探索的桥梁**?
  76. - 系统对哪些概念理解得好?哪些理解偏了?
  77. ### 2. 组合策略
  78. 基于发现的信号,设计下一层探索:
  79. **组合类型:**
  80. a) **关键词直接组合**
  81. - 两个关键词组合成新query
  82. b) **利用推荐词作为桥梁**(重要!)
  83. - 发现某个推荐词很有价值 → 直接探索这个推荐词
  84. - 或在推荐词基础上加其他关键词
  85. c) **跨层级组合**
  86. - 结合多层发现的有价值推荐词
  87. - 组合成更复杂的query
  88. ### 3. 停止条件
  89. **何时可以评估候选?**
  90. 满足以下之一:
  91. - 推荐词中出现了**明确包含原始需求多个核心要素的query**
  92. - 已经探索到**足够复杂的组合**(3-4个关键词),且推荐词相关
  93. - 探索了**3-4层**,信息已经足够丰富
  94. **何时继续探索?**
  95. - 当前推荐词太泛,没有接近原始需求
  96. - 发现了有价值的信号,但需要进一步组合验证
  97. - 层数还少(< 3层)
  98. ## 输出要求
  99. ### 1. key_findings
  100. 总结当前层发现的关键信息,包括:
  101. - 哪些推荐词最有价值?
  102. - 系统对哪些概念理解得好/不好?
  103. - 发现了什么意外的方向?
  104. ### 2. promising_signals
  105. 列出最有价值的推荐词(来自任何已探索的query),每个说明为什么有价值
  106. ### 3. should_evaluate_now
  107. 是否已经可以开始评估候选了?true/false
  108. ### 4. candidates_to_evaluate
  109. 如果should_evaluate_now=true,列出应该评估的候选query
  110. - 可以是推荐词
  111. - 可以是自己构造的组合
  112. ### 5. next_combinations
  113. 如果should_evaluate_now=false,列出下一层应该探索的query组合
  114. ### 6. reasoning
  115. 详细的推理过程
  116. ## 重要原则
  117. 1. **不要过早评估**:至少探索2层,除非第一层就发现了完美匹配
  118. 2. **充分利用推荐词**:推荐词是系统给的提示,要善用
  119. 3. **保持探索方向的多样性**:不要只盯着一个方向
  120. 4. **识别死胡同**:如果某个方向的推荐词一直不相关,果断放弃
  121. """.strip()
  122. class PromisingSignal(BaseModel):
  123. """有价值的推荐词信号"""
  124. query: str = Field(..., description="推荐词")
  125. from_level: int = Field(..., description="来自哪一层")
  126. reason: str = Field(..., description="为什么有价值")
  127. class LevelAnalysis(BaseModel):
  128. """层级分析结果"""
  129. key_findings: str = Field(..., description="当前层的关键发现")
  130. promising_signals: list[PromisingSignal] = Field(..., description="有价值的推荐词信号")
  131. should_evaluate_now: bool = Field(..., description="是否应该开始评估候选")
  132. candidates_to_evaluate: list[str] = Field(default_factory=list, description="如果should_evaluate_now=true,要评估的候选query列表")
  133. next_combinations: list[str] = Field(default_factory=list, description="如果should_evaluate_now=false,下一层要探索的query组合")
  134. reasoning: str = Field(..., description="详细的推理过程")
  135. level_analyzer = Agent[None](
  136. name="层级探索分析专家",
  137. instructions=level_analysis_instructions,
  138. output_type=LevelAnalysis,
  139. )
  140. # ============================================================================
  141. # Agent 3: 评估专家(复用v5_3的评估逻辑)
  142. # ============================================================================
  143. eval_instructions = """
  144. 你是搜索query评估专家。给定原始问题和推荐query,评估三个分数。
  145. ## 评估目标
  146. 用这个推荐query搜索,能否找到满足原始需求的内容?
  147. ## 三层评分
  148. ### 1. essence_score(本质/意图)= 0 或 1
  149. 推荐query的本质/意图是否与原问题一致?
  150. **判断标准:**
  151. - 原问题的核心意图是什么?(找方法、找教程、找作品、找工具、找资源等)
  152. - 推荐词是否明确表达了相同的意图?
  153. **评分原则:**
  154. - 1 = 本质一致,推荐词**明确表达**相同意图
  155. - 0 = 本质改变或**不够明确**
  156. ### 2. hard_score(硬性约束)= 0 或 1
  157. 在本质一致的前提下,是否满足所有硬性约束?
  158. **硬性约束**:地域、时间、对象、工具等客观可验证的限定
  159. **评分:**
  160. - 1 = 所有硬性约束都满足
  161. - 0 = 任一硬性约束不满足
  162. ### 3. soft_score(软性修饰)= 0-1
  163. 软性修饰词(质量、特色、美观等主观评价)保留了多少?
  164. **评分参考:**
  165. - 1.0 = 完整保留
  166. - 0.7-0.9 = 保留核心
  167. - 0.4-0.6 = 部分丢失
  168. - 0-0.3 = 大量丢失
  169. ## 注意
  170. - essence=0 直接拒绝,不管hard/soft多高
  171. - essence=1, hard=0 也要拒绝
  172. - essence=1, hard=1 才看soft_score
  173. """.strip()
  174. class EvaluationFeedback(BaseModel):
  175. """评估反馈模型 - 三层评分"""
  176. essence_score: Literal[0, 1] = Field(..., description="本质/意图匹配度,0或1")
  177. hard_score: Literal[0, 1] = Field(..., description="硬性约束匹配度,0或1")
  178. soft_score: float = Field(..., description="软性修饰完整度,0-1")
  179. reason: str = Field(..., description="评估理由")
  180. evaluator = Agent[None](
  181. name="评估专家",
  182. instructions=eval_instructions,
  183. output_type=EvaluationFeedback,
  184. )
  185. # ============================================================================
  186. # 核心函数
  187. # ============================================================================
  188. async def extract_keywords(q: str) -> KeywordList:
  189. """提取关键词"""
  190. print("\n正在提取关键词...")
  191. result = await Runner.run(keyword_extractor, q)
  192. keyword_list: KeywordList = result.final_output
  193. print(f"提取的关键词:{keyword_list.keywords}")
  194. print(f"提取理由:{keyword_list.reasoning}")
  195. return keyword_list
  196. async def explore_level(queries: list[str], level_num: int, context: RunContext) -> dict:
  197. """探索一个层级(并发获取所有query的推荐词)"""
  198. print(f"\n{'='*60}")
  199. print(f"Level {level_num} 探索:{len(queries)} 个query")
  200. print(f"{'='*60}")
  201. xiaohongshu_api = XiaohongshuSearchRecommendations()
  202. # 并发获取所有推荐词
  203. async def get_single_sug(query: str):
  204. print(f" 探索: {query}")
  205. suggestions = xiaohongshu_api.get_recommendations(keyword=query)
  206. print(f" → {len(suggestions) if suggestions else 0} 个推荐词")
  207. return {
  208. "query": query,
  209. "suggestions": suggestions or []
  210. }
  211. results = await asyncio.gather(*[get_single_sug(q) for q in queries])
  212. level_data = {
  213. "level": level_num,
  214. "timestamp": datetime.now().isoformat(),
  215. "queries": results
  216. }
  217. context.exploration_levels.append(level_data)
  218. return level_data
  219. async def analyze_level(level_data: dict, all_levels: list[dict], original_question: str, context: RunContext) -> LevelAnalysis:
  220. """分析当前层级,决定下一步"""
  221. print(f"\n正在分析 Level {level_data['level']}...")
  222. # 构造输入
  223. analysis_input = f"""
  224. <原始问题>
  225. {original_question}
  226. </原始问题>
  227. <已探索的所有层级>
  228. {json.dumps(all_levels, ensure_ascii=False, indent=2)}
  229. </已探索的所有层级>
  230. <当前层级>
  231. Level {level_data['level']}
  232. {json.dumps(level_data['queries'], ensure_ascii=False, indent=2)}
  233. </当前层级>
  234. 请分析当前探索状态,决定下一步行动。
  235. """
  236. result = await Runner.run(level_analyzer, analysis_input)
  237. analysis: LevelAnalysis = result.final_output
  238. print(f"\n分析结果:")
  239. print(f" 关键发现:{analysis.key_findings}")
  240. print(f" 有价值的信号:{len(analysis.promising_signals)} 个")
  241. print(f" 是否评估:{analysis.should_evaluate_now}")
  242. if analysis.should_evaluate_now:
  243. print(f" 候选query:{analysis.candidates_to_evaluate}")
  244. else:
  245. print(f" 下一层探索:{analysis.next_combinations}")
  246. # 保存分析结果
  247. context.level_analyses.append({
  248. "level": level_data['level'],
  249. "timestamp": datetime.now().isoformat(),
  250. "analysis": analysis.model_dump()
  251. })
  252. return analysis
  253. async def evaluate_candidates(candidates: list[str], original_question: str, context: RunContext) -> list[dict]:
  254. """评估候选query"""
  255. print(f"\n{'='*60}")
  256. print(f"评估 {len(candidates)} 个候选query")
  257. print(f"{'='*60}")
  258. xiaohongshu_api = XiaohongshuSearchRecommendations()
  259. async def evaluate_single_candidate(candidate: str):
  260. print(f"\n评估候选:{candidate}")
  261. # 1. 获取推荐词
  262. suggestions = xiaohongshu_api.get_recommendations(keyword=candidate)
  263. print(f" 获取到 {len(suggestions) if suggestions else 0} 个推荐词")
  264. if not suggestions:
  265. return {
  266. "candidate": candidate,
  267. "suggestions": [],
  268. "evaluations": []
  269. }
  270. # 2. 评估每个推荐词
  271. async def eval_single_sug(sug: str):
  272. eval_input = f"""
  273. <原始问题>
  274. {original_question}
  275. </原始问题>
  276. <待评估的推荐query>
  277. {sug}
  278. </待评估的推荐query>
  279. 请评估该推荐query的三个分数:
  280. 1. essence_score: 本质/意图是否一致(0或1)
  281. 2. hard_score: 硬性约束是否满足(0或1)
  282. 3. soft_score: 软性修饰保留程度(0-1)
  283. 4. reason: 详细的评估理由
  284. """
  285. result = await Runner.run(evaluator, eval_input)
  286. evaluation: EvaluationFeedback = result.final_output
  287. return {
  288. "query": sug,
  289. "essence_score": evaluation.essence_score,
  290. "hard_score": evaluation.hard_score,
  291. "soft_score": evaluation.soft_score,
  292. "reason": evaluation.reason,
  293. }
  294. evaluations = await asyncio.gather(*[eval_single_sug(s) for s in suggestions])
  295. return {
  296. "candidate": candidate,
  297. "suggestions": suggestions,
  298. "evaluations": evaluations
  299. }
  300. results = await asyncio.gather(*[evaluate_single_candidate(c) for c in candidates])
  301. context.evaluation_results = results
  302. return results
  303. def find_qualified_queries(evaluation_results: list[dict], min_soft_score: float = 0.7) -> list[dict]:
  304. """查找所有合格的query"""
  305. all_qualified = []
  306. for result in evaluation_results:
  307. for eval_item in result.get("evaluations", []):
  308. if (eval_item['essence_score'] == 1
  309. and eval_item['hard_score'] == 1
  310. and eval_item['soft_score'] >= min_soft_score):
  311. all_qualified.append({
  312. "from_candidate": result["candidate"],
  313. **eval_item
  314. })
  315. # 按soft_score降序排列
  316. return sorted(all_qualified, key=lambda x: x['soft_score'], reverse=True)
  317. # ============================================================================
  318. # 主流程
  319. # ============================================================================
  320. async def progressive_exploration(context: RunContext, max_levels: int = 4) -> dict:
  321. """
  322. 渐进式广度探索流程
  323. Args:
  324. context: 运行上下文
  325. max_levels: 最大探索层数,默认4
  326. 返回格式:
  327. {
  328. "success": True/False,
  329. "results": [...],
  330. "message": "..."
  331. }
  332. """
  333. # 阶段1:提取关键词(从原始问题提取)
  334. keyword_result = await extract_keywords(context.q)
  335. context.keywords = keyword_result.keywords
  336. # 阶段2:渐进式探索
  337. current_level = 1
  338. # Level 1:单个关键词
  339. level_1_queries = context.keywords[:7] # 限制最多7个关键词
  340. level_1_data = await explore_level(level_1_queries, current_level, context)
  341. # 分析Level 1
  342. analysis_1 = await analyze_level(level_1_data, context.exploration_levels, context.q, context)
  343. if analysis_1.should_evaluate_now:
  344. # 直接评估
  345. eval_results = await evaluate_candidates(analysis_1.candidates_to_evaluate, context.q, context)
  346. qualified = find_qualified_queries(eval_results, min_soft_score=0.7)
  347. if qualified:
  348. return {
  349. "success": True,
  350. "results": qualified,
  351. "message": f"Level 1 即找到 {len(qualified)} 个合格query"
  352. }
  353. # Level 2 及以后:迭代探索
  354. for level_num in range(2, max_levels + 1):
  355. # 获取上一层的分析结果
  356. prev_analysis: LevelAnalysis = context.level_analyses[-1]["analysis"]
  357. prev_analysis = LevelAnalysis(**prev_analysis) # 转回对象
  358. if not prev_analysis.next_combinations:
  359. print(f"\nLevel {level_num-1} 分析后无需继续探索")
  360. break
  361. # 探索当前层
  362. level_data = await explore_level(prev_analysis.next_combinations, level_num, context)
  363. # 分析当前层
  364. analysis = await analyze_level(level_data, context.exploration_levels, context.q, context)
  365. if analysis.should_evaluate_now:
  366. # 评估候选
  367. eval_results = await evaluate_candidates(analysis.candidates_to_evaluate, context.q, context)
  368. qualified = find_qualified_queries(eval_results, min_soft_score=0.7)
  369. if qualified:
  370. return {
  371. "success": True,
  372. "results": qualified,
  373. "message": f"Level {level_num} 找到 {len(qualified)} 个合格query"
  374. }
  375. # 所有层探索完,降低标准
  376. print(f"\n{'='*60}")
  377. print(f"探索完 {max_levels} 层,降低标准(soft_score >= 0.5)")
  378. print(f"{'='*60}")
  379. if context.evaluation_results:
  380. acceptable = find_qualified_queries(context.evaluation_results, min_soft_score=0.5)
  381. if acceptable:
  382. return {
  383. "success": True,
  384. "results": acceptable,
  385. "message": f"找到 {len(acceptable)} 个可接受query(soft_score >= 0.5)"
  386. }
  387. # 完全失败
  388. return {
  389. "success": False,
  390. "results": [],
  391. "message": "探索完所有层级,未找到合格的推荐词"
  392. }
  393. # ============================================================================
  394. # 输出格式化
  395. # ============================================================================
  396. def format_output(optimization_result: dict, context: RunContext) -> str:
  397. """格式化输出结果"""
  398. results = optimization_result.get("results", [])
  399. output = f"原始问题:{context.q}\n"
  400. output += f"提取的关键词:{', '.join(context.keywords or [])}\n"
  401. output += f"探索层数:{len(context.exploration_levels)}\n"
  402. output += f"状态:{optimization_result['message']}\n\n"
  403. if optimization_result["success"] and results:
  404. output += "合格的推荐query(按soft_score降序):\n"
  405. for i, result in enumerate(results, 1):
  406. output += f"\n{i}. {result['query']}\n"
  407. output += f" - 来自候选:{result['from_candidate']}\n"
  408. output += f" - 本质匹配度:{result['essence_score']} (1=本质一致)\n"
  409. output += f" - 硬性约束匹配度:{result['hard_score']} (1=所有约束满足)\n"
  410. output += f" - 软性修饰完整度:{result['soft_score']:.2f} (0-1)\n"
  411. output += f" - 评估理由:{result['reason']}\n"
  412. else:
  413. output += "结果:未找到合格推荐query\n"
  414. if context.level_analyses:
  415. last_analysis = context.level_analyses[-1]["analysis"]
  416. output += f"\n最后一层分析:\n{last_analysis.get('key_findings', 'N/A')}\n"
  417. return output.strip()
  418. # ============================================================================
  419. # 主函数
  420. # ============================================================================
  421. async def main(input_dir: str, max_levels: int = 4):
  422. current_time, log_url = set_trace()
  423. # 从目录中读取固定文件名
  424. input_context_file = os.path.join(input_dir, 'context.md')
  425. input_q_file = os.path.join(input_dir, 'q.md')
  426. q_context = read_file_as_string(input_context_file)
  427. q = read_file_as_string(input_q_file)
  428. q_with_context = f"""
  429. <需求上下文>
  430. {q_context}
  431. </需求上下文>
  432. <当前问题>
  433. {q}
  434. </当前问题>
  435. """.strip()
  436. # 获取当前文件名作为版本
  437. version = os.path.basename(__file__)
  438. version_name = os.path.splitext(version)[0]
  439. # 日志保存目录
  440. log_dir = os.path.join(input_dir, "output", version_name, current_time)
  441. run_context = RunContext(
  442. version=version,
  443. input_files={
  444. "input_dir": input_dir,
  445. "context_file": input_context_file,
  446. "q_file": input_q_file,
  447. },
  448. q_with_context=q_with_context,
  449. q_context=q_context,
  450. q=q,
  451. log_dir=log_dir,
  452. log_url=log_url,
  453. )
  454. # 执行渐进式探索
  455. optimization_result = await progressive_exploration(run_context, max_levels=max_levels)
  456. # 格式化输出
  457. final_output = format_output(optimization_result, run_context)
  458. print(f"\n{'='*60}")
  459. print("最终结果")
  460. print(f"{'='*60}")
  461. print(final_output)
  462. # 保存结果
  463. run_context.optimization_result = optimization_result
  464. run_context.final_output = final_output
  465. # 保存 RunContext 到 log_dir
  466. os.makedirs(run_context.log_dir, exist_ok=True)
  467. context_file_path = os.path.join(run_context.log_dir, "run_context.json")
  468. with open(context_file_path, "w", encoding="utf-8") as f:
  469. json.dump(run_context.model_dump(), f, ensure_ascii=False, indent=2)
  470. print(f"\nRunContext saved to: {context_file_path}")
  471. if __name__ == "__main__":
  472. parser = argparse.ArgumentParser(description="搜索query优化工具 - 渐进式广度探索版")
  473. parser.add_argument(
  474. "--input-dir",
  475. type=str,
  476. default="input/简单扣图",
  477. help="输入目录路径,默认: input/简单扣图"
  478. )
  479. parser.add_argument(
  480. "--max-levels",
  481. type=int,
  482. default=4,
  483. help="最大探索层数,默认: 4"
  484. )
  485. args = parser.parse_args()
  486. asyncio.run(main(args.input_dir, max_levels=args.max_levels))