| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465 |
- import asyncio
- import json
- import os
- import sys
- import argparse
- from datetime import datetime
- from typing import Literal
- from agents import Agent, Runner
- from lib.my_trace import set_trace
- from pydantic import BaseModel, Field
- from lib.utils import read_file_as_string
- from lib.client import get_model
- MODEL_NAME = "google/gemini-2.5-flash"
- from script.search_recommendations.xiaohongshu_search_recommendations import XiaohongshuSearchRecommendations
- from script.search.xiaohongshu_search import XiaohongshuSearch
- # ============================================================================
- # 数据模型
- # ============================================================================
- class QueryState(BaseModel):
- """Query状态跟踪"""
- query: str
- level: int # 当前所在层级
- no_suggestion_rounds: int = 0 # 连续没有suggestion的轮数
- relevance_score: float = 0.0 # 与原始需求的相关度
- parent_query: str | None = None # 父query
- strategy: str | None = None # 生成策略:direct_sug, rewrite, add_word
- is_terminated: bool = False # 是否已终止(不再处理)
- class WordLibrary(BaseModel):
- """动态分词库"""
- words: set[str] = Field(default_factory=set)
- word_sources: dict[str, str] = Field(default_factory=dict) # 记录词的来源:word -> source(note_id或"initial")
- def add_word(self, word: str, source: str = "unknown"):
- """添加单词到分词库"""
- if word and word.strip():
- word = word.strip()
- self.words.add(word)
- if word not in self.word_sources:
- self.word_sources[word] = source
- def add_words(self, words: list[str], source: str = "unknown"):
- """批量添加单词"""
- for word in words:
- self.add_word(word, source)
- def get_unused_word(self, current_query: str) -> str | None:
- """获取一个当前query中没有的词"""
- for word in self.words:
- if word not in current_query:
- return word
- return None
- def model_dump(self):
- """序列化为dict"""
- return {
- "words": list(self.words),
- "word_sources": self.word_sources
- }
- class RunContext(BaseModel):
- """运行上下文"""
- version: str
- input_files: dict[str, str]
- q_with_context: str
- q_context: str
- q: str
- log_url: str
- log_dir: str
- # 新增字段
- word_library: dict = Field(default_factory=dict) # 使用dict存储,因为set不能直接序列化
- query_states: list[dict] = Field(default_factory=list)
- steps: list[dict] = Field(default_factory=list)
- # Query演化图
- query_graph: dict = Field(default_factory=dict) # 记录Query的演化路径和关系
- # 最终结果
- satisfied_notes: list[dict] = Field(default_factory=list)
- final_output: str | None = None
- # ============================================================================
- # Agent 定义
- # ============================================================================
- # Agent 1: 分词专家
- class WordSegmentation(BaseModel):
- """分词结果"""
- words: list[str] = Field(..., description="分词结果列表")
- reasoning: str = Field(..., description="分词理由")
- word_segmentation_instructions = """
- 你是分词专家。给定一个query,将其拆分成有意义的最小单元。
- ## 分词原则
- 1. 保留有搜索意义的词汇
- 2. 拆分成独立的概念
- 3. 保留专业术语的完整性
- 4. 去除虚词(的、吗、呢等)
- ## 输出要求
- 返回分词列表和分词理由。
- """.strip()
- word_segmenter = Agent[None](
- name="分词专家",
- instructions=word_segmentation_instructions,
- model=get_model(MODEL_NAME),
- output_type=WordSegmentation,
- )
- # Agent 2: Query相关度评估专家
- class RelevanceEvaluation(BaseModel):
- """相关度评估"""
- relevance_score: float = Field(..., description="相关性分数 0-1")
- is_improved: bool = Field(..., description="是否比之前更好")
- reason: str = Field(..., description="评估理由")
- relevance_evaluation_instructions = """
- 你是Query相关度评估专家。
- ## 任务
- 评估当前query与原始需求的匹配程度。
- ## 评估标准
- - 主题相关性
- - 要素覆盖度
- - 意图匹配度
- ## 输出
- - relevance_score: 0-1的相关性分数
- - is_improved: 如果提供了previous_score,判断是否有提升
- - reason: 详细理由
- """.strip()
- relevance_evaluator = Agent[None](
- name="Query相关度评估专家",
- instructions=relevance_evaluation_instructions,
- model=get_model(MODEL_NAME),
- output_type=RelevanceEvaluation,
- )
- # Agent 3: Query改写专家
- class QueryRewrite(BaseModel):
- """Query改写结果"""
- rewritten_query: str = Field(..., description="改写后的query")
- rewrite_type: str = Field(..., description="改写类型:abstract或synonym")
- reasoning: str = Field(..., description="改写理由")
- query_rewrite_instructions = """
- 你是Query改写专家。
- ## 改写策略
- 1. **向上抽象**:将具体概念泛化到更高层次
- - 例:iPhone 13 → 智能手机
- 2. **同义改写**:使用同义词或相关表达
- - 例:购买 → 入手、获取
- ## 输出要求
- 返回改写后的query、改写类型和理由。
- """.strip()
- query_rewriter = Agent[None](
- name="Query改写专家",
- instructions=query_rewrite_instructions,
- model=get_model(MODEL_NAME),
- output_type=QueryRewrite,
- )
- # Agent 4: 加词位置评估专家
- class WordInsertion(BaseModel):
- """加词结果"""
- new_query: str = Field(..., description="加词后的新query")
- insertion_position: str = Field(..., description="插入位置描述")
- reasoning: str = Field(..., description="插入理由")
- word_insertion_instructions = """
- 你是加词位置评估专家。
- ## 任务
- 将新词加到当前query的最合适位置,保持语义通顺。
- ## 原则
- 1. 保持语法正确
- 2. 语义连贯
- 3. 符合搜索习惯
- ## 输出
- 返回新query、插入位置描述和理由。
- """.strip()
- word_inserter = Agent[None](
- name="加词位置评估专家",
- instructions=word_insertion_instructions,
- model=get_model(MODEL_NAME),
- output_type=WordInsertion,
- )
- # Agent 5: Result匹配度评估专家
- class ResultEvaluation(BaseModel):
- """Result评估结果"""
- match_level: str = Field(..., description="匹配等级:satisfied, partial, unsatisfied")
- relevance_score: float = Field(..., description="相关性分数 0-1")
- missing_aspects: list[str] = Field(default_factory=list, description="缺失的方面")
- reason: str = Field(..., description="评估理由")
- result_evaluation_instructions = """
- 你是Result匹配度评估专家。
- ## 任务
- 评估搜索结果(帖子)与原始需求的匹配程度。
- ## 评估等级
- 1. **satisfied**: 完全满足需求
- 2. **partial**: 部分满足,但有缺失
- 3. **unsatisfied**: 基本不满足
- ## 输出要求
- - match_level: 匹配等级
- - relevance_score: 相关性分数
- - missing_aspects: 如果是partial,列出缺失的方面
- - reason: 详细理由
- """.strip()
- result_evaluator = Agent[None](
- name="Result匹配度评估专家",
- instructions=result_evaluation_instructions,
- model=get_model(MODEL_NAME),
- output_type=ResultEvaluation,
- )
- # Agent 6: Query改造专家(基于缺失部分)
- class QueryImprovement(BaseModel):
- """Query改造结果"""
- improved_query: str = Field(..., description="改造后的query")
- added_aspects: list[str] = Field(..., description="添加的方面")
- reasoning: str = Field(..., description="改造理由")
- query_improvement_instructions = """
- 你是Query改造专家。
- ## 任务
- 根据搜索结果的缺失部分,改造query使其包含这些内容。
- ## 原则
- 1. 针对性补充缺失方面
- 2. 保持query简洁
- 3. 符合搜索习惯
- ## 输出
- 返回改造后的query、添加的方面和理由。
- """.strip()
- query_improver = Agent[None](
- name="Query改造专家",
- instructions=query_improvement_instructions,
- model=get_model(MODEL_NAME),
- output_type=QueryImprovement,
- )
- # Agent 7: 关键词提取专家
- class KeywordExtraction(BaseModel):
- """关键词提取结果"""
- keywords: list[str] = Field(..., description="提取的关键词列表")
- reasoning: str = Field(..., description="提取理由")
- keyword_extraction_instructions = """
- 你是关键词提取专家。
- ## 任务
- 从帖子标题和描述中提取核心关键词。
- ## 提取原则
- 1. 提取有搜索价值的词汇
- 2. 去除虚词和通用词
- 3. 保留专业术语
- 4. 提取3-10个关键词
- ## 输出
- 返回关键词列表和提取理由。
- """.strip()
- keyword_extractor = Agent[None](
- name="关键词提取专家",
- instructions=keyword_extraction_instructions,
- model=get_model(MODEL_NAME),
- output_type=KeywordExtraction,
- )
- # ============================================================================
- # 辅助函数
- # ============================================================================
- def add_step(context: RunContext, step_name: str, step_type: str, data: dict):
- """添加步骤记录"""
- step = {
- "step_number": len(context.steps) + 1,
- "step_name": step_name,
- "step_type": step_type,
- "timestamp": datetime.now().isoformat(),
- "data": data
- }
- context.steps.append(step)
- return step
- def add_query_to_graph(context: RunContext, query_state: QueryState, iteration: int, evaluation_reason: str = "", is_selected: bool = True):
- """添加Query节点到演化图
- Args:
- context: 运行上下文
- query_state: Query状态
- iteration: 迭代次数
- evaluation_reason: 评估原因(可选)
- is_selected: 是否被选中进入处理队列(默认True)
- """
- query_id = query_state.query # 直接使用query作为ID
- # 初始化图结构
- if "nodes" not in context.query_graph:
- context.query_graph["nodes"] = {}
- context.query_graph["edges"] = []
- context.query_graph["iterations"] = {}
- # 添加Query节点(type: query)
- context.query_graph["nodes"][query_id] = {
- "type": "query",
- "query": query_state.query,
- "level": query_state.level,
- "relevance_score": query_state.relevance_score,
- "strategy": query_state.strategy,
- "parent_query": query_state.parent_query,
- "iteration": iteration,
- "is_terminated": query_state.is_terminated,
- "no_suggestion_rounds": query_state.no_suggestion_rounds,
- "evaluation_reason": evaluation_reason, # 评估原因
- "is_selected": is_selected # 是否被选中
- }
- # 添加边(父子关系)
- if query_state.parent_query:
- parent_id = query_state.parent_query
- if parent_id in context.query_graph["nodes"]:
- context.query_graph["edges"].append({
- "from": parent_id,
- "to": query_id,
- "edge_type": "query_to_query",
- "strategy": query_state.strategy,
- "score_improvement": query_state.relevance_score - context.query_graph["nodes"][parent_id]["relevance_score"]
- })
- # 按迭代分组
- if iteration not in context.query_graph["iterations"]:
- context.query_graph["iterations"][iteration] = []
- context.query_graph["iterations"][iteration].append(query_id)
- def add_note_to_graph(context: RunContext, query: str, note: dict):
- """添加Note节点到演化图,并连接到对应的Query"""
- note_id = note["note_id"]
- # 初始化图结构
- if "nodes" not in context.query_graph:
- context.query_graph["nodes"] = {}
- context.query_graph["edges"] = []
- context.query_graph["iterations"] = {}
- # 添加Note节点(type: note),包含完整的元信息
- context.query_graph["nodes"][note_id] = {
- "type": "note",
- "note_id": note_id,
- "title": note["title"],
- "desc": note.get("desc", ""), # 完整描述,不截断
- "note_url": note.get("note_url", ""),
- "image_list": note.get("image_list", []), # 图片列表
- "interact_info": note.get("interact_info", {}), # 互动信息(点赞、收藏、评论、分享)
- "match_level": note["evaluation"]["match_level"],
- "relevance_score": note["evaluation"]["relevance_score"],
- "evaluation_reason": note["evaluation"].get("reason", ""), # 评估原因
- "found_by_query": query
- }
- # 添加边:Query → Note
- if query in context.query_graph["nodes"]:
- context.query_graph["edges"].append({
- "from": query,
- "to": note_id,
- "edge_type": "query_to_note",
- "match_level": note["evaluation"]["match_level"],
- "relevance_score": note["evaluation"]["relevance_score"]
- })
- def process_note_data(note: dict) -> dict:
- """处理搜索接口返回的帖子数据"""
- note_card = note.get("note_card", {})
- image_list = note_card.get("image_list", [])
- interact_info = note_card.get("interact_info", {})
- user_info = note_card.get("user", {})
- return {
- "note_id": note.get("id", ""),
- "title": note_card.get("display_title", ""),
- "desc": note_card.get("desc", ""),
- "image_list": image_list,
- "interact_info": {
- "liked_count": interact_info.get("liked_count", 0),
- "collected_count": interact_info.get("collected_count", 0),
- "comment_count": interact_info.get("comment_count", 0),
- "shared_count": interact_info.get("shared_count", 0)
- },
- "user": {
- "nickname": user_info.get("nickname", ""),
- "user_id": user_info.get("user_id", "")
- },
- "type": note_card.get("type", "normal"),
- "note_url": f"https://www.xiaohongshu.com/explore/{note.get('id', '')}"
- }
- # ============================================================================
- # 核心流程函数
- # ============================================================================
- async def initialize_word_library(original_query: str, context: RunContext) -> WordLibrary:
- """初始化分词库"""
- print("\n[初始化] 创建分词库...")
- # 使用Agent进行分词
- result = await Runner.run(word_segmenter, original_query)
- segmentation: WordSegmentation = result.final_output
- word_lib = WordLibrary()
- word_lib.add_words(segmentation.words, source="initial")
- print(f"初始分词库: {list(word_lib.words)}")
- print(f"分词理由: {segmentation.reasoning}")
- # 保存到context
- context.word_library = word_lib.model_dump()
- add_step(context, "初始化分词库", "word_library_init", {
- "agent": "分词专家",
- "input": original_query,
- "output": {
- "words": segmentation.words,
- "reasoning": segmentation.reasoning
- },
- "result": {
- "word_library": list(word_lib.words)
- }
- })
- return word_lib
- async def evaluate_query_relevance(
- query: str,
- original_need: str,
- previous_score: float | None = None,
- context: RunContext = None
- ) -> RelevanceEvaluation:
- """评估query与原始需求的相关度"""
- eval_input = f"""
- <原始需求>
- {original_need}
- </原始需求>
- <当前Query>
- {query}
- </当前Query>
- {"<之前的相关度分数>" + str(previous_score) + "</之前的相关度分数>" if previous_score is not None else ""}
- 请评估当前query与原始需求的相关度。
- """
- result = await Runner.run(relevance_evaluator, eval_input)
- evaluation: RelevanceEvaluation = result.final_output
- return evaluation
- async def process_suggestions(
- query: str,
- query_state: QueryState,
- original_need: str,
- word_lib: WordLibrary,
- context: RunContext,
- xiaohongshu_api: XiaohongshuSearchRecommendations,
- iteration: int
- ) -> list[QueryState]:
- """处理suggestion分支,返回新的query states"""
- print(f"\n [Suggestion分支] 处理query: {query}")
- # 收集本次分支处理中的所有Agent调用
- agent_calls = []
- # 1. 获取suggestions
- suggestions = xiaohongshu_api.get_recommendations(keyword=query)
- if not suggestions or len(suggestions) == 0:
- print(f" → 没有获取到suggestion")
- query_state.no_suggestion_rounds += 1
- # 记录步骤
- add_step(context, f"Suggestion分支 - {query}", "suggestion_branch", {
- "query": query,
- "query_level": query_state.level,
- "suggestions_count": 0,
- "no_suggestion_rounds": query_state.no_suggestion_rounds,
- "new_queries_generated": 0
- })
- return []
- print(f" → 获取到 {len(suggestions)} 个suggestions")
- query_state.no_suggestion_rounds = 0 # 重置计数
- # 2. 评估每个suggestion
- new_queries = []
- suggestion_evaluations = []
- for sug in suggestions[:5]: # 限制处理数量
- # 评估sug与原始需求的相关度(注意:这里是与原始需求original_need对比,而非当前query)
- # 这样可以确保生成的suggestion始终围绕用户的核心需求
- sug_eval = await evaluate_query_relevance(sug, original_need, query_state.relevance_score, context)
- sug_eval_record = {
- "suggestion": sug,
- "relevance_score": sug_eval.relevance_score,
- "is_improved": sug_eval.is_improved,
- "reason": sug_eval.reason
- }
- suggestion_evaluations.append(sug_eval_record)
- # 创建query state(所有suggestion都作为query节点)
- sug_state = QueryState(
- query=sug,
- level=query_state.level + 1,
- relevance_score=sug_eval.relevance_score,
- parent_query=query,
- strategy="direct_sug"
- )
- # 判断是否比当前query更好(只有提升的才加入待处理队列)
- is_selected = sug_eval.is_improved and sug_eval.relevance_score > query_state.relevance_score
- # 将所有suggestion添加到演化图(包括未提升的)
- add_query_to_graph(
- context,
- sug_state,
- iteration,
- evaluation_reason=sug_eval.reason,
- is_selected=is_selected
- )
- if is_selected:
- print(f" ✓ {sug} (分数: {sug_eval.relevance_score:.2f}, 提升: {sug_eval.is_improved})")
- new_queries.append(sug_state)
- else:
- print(f" ✗ {sug} (分数: {sug_eval.relevance_score:.2f}, 未提升)")
- # 3. 改写策略(向上抽象或同义改写)
- if len(new_queries) < 3: # 如果直接使用sug的数量不够,尝试改写
- # 尝试向上抽象
- rewrite_input_abstract = f"""
- <当前Query>
- {query}
- </当前Query>
- <改写要求>
- 类型: abstract (向上抽象)
- </改写要求>
- 请改写这个query。
- """
- result = await Runner.run(query_rewriter, rewrite_input_abstract)
- rewrite: QueryRewrite = result.final_output
- # 收集改写Agent的输入输出
- rewrite_agent_call = {
- "agent": "Query改写专家",
- "action": "向上抽象改写",
- "input": {
- "query": query,
- "rewrite_type": "abstract"
- },
- "output": {
- "rewritten_query": rewrite.rewritten_query,
- "rewrite_type": rewrite.rewrite_type,
- "reasoning": rewrite.reasoning
- }
- }
- agent_calls.append(rewrite_agent_call)
- # 评估改写后的query
- rewrite_eval = await evaluate_query_relevance(rewrite.rewritten_query, original_need, query_state.relevance_score, context)
- # 创建改写后的query state
- new_state = QueryState(
- query=rewrite.rewritten_query,
- level=query_state.level + 1,
- relevance_score=rewrite_eval.relevance_score,
- parent_query=query,
- strategy="rewrite_abstract"
- )
- # 添加到演化图(无论是否提升)
- add_query_to_graph(
- context,
- new_state,
- iteration,
- evaluation_reason=rewrite_eval.reason,
- is_selected=rewrite_eval.is_improved
- )
- if rewrite_eval.is_improved:
- print(f" ✓ 改写(抽象): {rewrite.rewritten_query} (分数: {rewrite_eval.relevance_score:.2f})")
- new_queries.append(new_state)
- else:
- print(f" ✗ 改写(抽象): {rewrite.rewritten_query} (分数: {rewrite_eval.relevance_score:.2f}, 未提升)")
- # 3.2. 同义改写策略
- if len(new_queries) < 4: # 如果还不够,尝试同义改写
- rewrite_input_synonym = f"""
- <当前Query>
- {query}
- </当前Query>
- <改写要求>
- 类型: synonym (同义改写)
- 使用同义词或相关表达来改写query,保持语义相同但表达方式不同。
- </改写要求>
- 请改写这个query。
- """
- result = await Runner.run(query_rewriter, rewrite_input_synonym)
- rewrite_syn: QueryRewrite = result.final_output
- # 收集同义改写Agent的输入输出
- rewrite_syn_agent_call = {
- "agent": "Query改写专家",
- "action": "同义改写",
- "input": {
- "query": query,
- "rewrite_type": "synonym"
- },
- "output": {
- "rewritten_query": rewrite_syn.rewritten_query,
- "rewrite_type": rewrite_syn.rewrite_type,
- "reasoning": rewrite_syn.reasoning
- }
- }
- agent_calls.append(rewrite_syn_agent_call)
- # 评估改写后的query
- rewrite_syn_eval = await evaluate_query_relevance(rewrite_syn.rewritten_query, original_need, query_state.relevance_score, context)
- # 创建改写后的query state
- new_state = QueryState(
- query=rewrite_syn.rewritten_query,
- level=query_state.level + 1,
- relevance_score=rewrite_syn_eval.relevance_score,
- parent_query=query,
- strategy="rewrite_synonym"
- )
- # 添加到演化图(无论是否提升)
- add_query_to_graph(
- context,
- new_state,
- iteration,
- evaluation_reason=rewrite_syn_eval.reason,
- is_selected=rewrite_syn_eval.is_improved
- )
- if rewrite_syn_eval.is_improved:
- print(f" ✓ 改写(同义): {rewrite_syn.rewritten_query} (分数: {rewrite_syn_eval.relevance_score:.2f})")
- new_queries.append(new_state)
- else:
- print(f" ✗ 改写(同义): {rewrite_syn.rewritten_query} (分数: {rewrite_syn_eval.relevance_score:.2f}, 未提升)")
- # 4. 加词策略
- unused_word = word_lib.get_unused_word(query)
- if unused_word and len(new_queries) < 5:
- insertion_input = f"""
- <当前Query>
- {query}
- </当前Query>
- <要添加的词>
- {unused_word}
- </要添加的词>
- 请将这个词加到query的最合适位置。
- """
- result = await Runner.run(word_inserter, insertion_input)
- insertion: WordInsertion = result.final_output
- # 收集加词Agent的输入输出
- insertion_agent_call = {
- "agent": "加词位置评估专家",
- "action": "加词",
- "input": {
- "query": query,
- "word_to_add": unused_word
- },
- "output": {
- "new_query": insertion.new_query,
- "insertion_position": insertion.insertion_position,
- "reasoning": insertion.reasoning
- }
- }
- agent_calls.append(insertion_agent_call)
- # 评估加词后的query
- insertion_eval = await evaluate_query_relevance(insertion.new_query, original_need, query_state.relevance_score, context)
- # 创建加词后的query state
- new_state = QueryState(
- query=insertion.new_query,
- level=query_state.level + 1,
- relevance_score=insertion_eval.relevance_score,
- parent_query=query,
- strategy="add_word"
- )
- # 添加到演化图(无论是否提升)
- add_query_to_graph(
- context,
- new_state,
- iteration,
- evaluation_reason=insertion_eval.reason,
- is_selected=insertion_eval.is_improved
- )
- if insertion_eval.is_improved:
- print(f" ✓ 加词: {insertion.new_query} (分数: {insertion_eval.relevance_score:.2f})")
- new_queries.append(new_state)
- else:
- print(f" ✗ 加词: {insertion.new_query} (分数: {insertion_eval.relevance_score:.2f}, 未提升)")
- # 记录完整的suggestion分支处理结果(层级化)
- add_step(context, f"Suggestion分支 - {query}", "suggestion_branch", {
- "query": query,
- "query_level": query_state.level,
- "query_relevance": query_state.relevance_score,
- "suggestions_count": len(suggestions),
- "suggestions_evaluated": len(suggestion_evaluations),
- "suggestion_evaluations": suggestion_evaluations[:10], # 只保存前10个
- "agent_calls": agent_calls, # 所有Agent调用的详细记录
- "new_queries_generated": len(new_queries),
- "new_queries": [{"query": nq.query, "score": nq.relevance_score, "strategy": nq.strategy} for nq in new_queries],
- "no_suggestion_rounds": query_state.no_suggestion_rounds
- })
- return new_queries
- async def process_search_results(
- query: str,
- query_state: QueryState,
- original_need: str,
- word_lib: WordLibrary,
- context: RunContext,
- xiaohongshu_search: XiaohongshuSearch,
- relevance_threshold: float,
- iteration: int
- ) -> tuple[list[dict], list[QueryState]]:
- """
- 处理搜索结果分支
- 返回: (满足需求的notes, 需要继续迭代的新queries)
- """
- print(f"\n [Result分支] 搜索query: {query}")
- # 收集本次分支处理中的所有Agent调用
- agent_calls = []
- # 1. 判断query相关度是否达到门槛
- if query_state.relevance_score < relevance_threshold:
- print(f" ✗ 相关度 {query_state.relevance_score:.2f} 低于门槛 {relevance_threshold},跳过搜索")
- return [], []
- print(f" ✓ 相关度 {query_state.relevance_score:.2f} 达到门槛,执行搜索")
- # 2. 执行搜索
- try:
- search_result = xiaohongshu_search.search(keyword=query)
- result_str = search_result.get("result", "{}")
- if isinstance(result_str, str):
- result_data = json.loads(result_str)
- else:
- result_data = result_str
- notes = result_data.get("data", {}).get("data", [])
- print(f" → 搜索到 {len(notes)} 个帖子")
- except Exception as e:
- print(f" ✗ 搜索失败: {e}")
- return [], []
- if not notes:
- return [], []
- # 3. 评估每个帖子
- satisfied_notes = []
- partial_notes = []
- for note in notes[:10]: # 限制评估数量
- note_data = process_note_data(note)
- title = note_data["title"] or ""
- desc = note_data["desc"] or ""
- # 跳过空标题和描述的帖子
- if not title and not desc:
- continue
- # 评估帖子
- eval_input = f"""
- <原始需求>
- {original_need}
- </原始需求>
- <帖子>
- 标题: {title}
- 描述: {desc}
- </帖子>
- 请评估这个帖子与原始需求的匹配程度。
- """
- result = await Runner.run(result_evaluator, eval_input)
- evaluation: ResultEvaluation = result.final_output
- # 收集Result评估Agent的输入输出
- result_eval_agent_call = {
- "agent": "Result匹配度评估专家",
- "action": "评估帖子匹配度",
- "input": {
- "note_id": note_data.get("note_id"),
- "title": title,
- "desc": desc[:200] if len(desc) > 200 else desc # 限制长度
- },
- "output": {
- "match_level": evaluation.match_level,
- "relevance_score": evaluation.relevance_score,
- "missing_aspects": evaluation.missing_aspects,
- "reason": evaluation.reason
- }
- }
- agent_calls.append(result_eval_agent_call)
- note_data["evaluation"] = {
- "match_level": evaluation.match_level,
- "relevance_score": evaluation.relevance_score,
- "missing_aspects": evaluation.missing_aspects,
- "reason": evaluation.reason
- }
- # 将所有评估过的帖子添加到演化图(包括satisfied、partial、unsatisfied)
- add_note_to_graph(context, query, note_data)
- if evaluation.match_level == "satisfied":
- satisfied_notes.append(note_data)
- print(f" ✓ 满足: {title[:30] if len(title) > 30 else title}... (分数: {evaluation.relevance_score:.2f})")
- elif evaluation.match_level == "partial":
- partial_notes.append(note_data)
- print(f" ~ 部分: {title[:30] if len(title) > 30 else title}... (缺失: {', '.join(evaluation.missing_aspects[:2])})")
- else: # unsatisfied
- print(f" ✗ 不满足: {title[:30] if len(title) > 30 else title}... (分数: {evaluation.relevance_score:.2f})")
- # 4. 处理满足的帖子:不再扩充分词库(避免无限扩张)
- new_queries = []
- if satisfied_notes:
- print(f"\n ✓ 找到 {len(satisfied_notes)} 个满足的帖子,不再提取关键词入库")
- # 注释掉关键词提取逻辑,保持分词库稳定
- # for note in satisfied_notes[:3]:
- # extract_input = f"""
- # <帖子>
- # 标题: {note['title']}
- # 描述: {note['desc']}
- # </帖子>
- #
- # 请提取核心关键词。
- # """
- # result = await Runner.run(keyword_extractor, extract_input)
- # extraction: KeywordExtraction = result.final_output
- #
- # # 添加新词到分词库,标记来源
- # note_id = note.get('note_id', 'unknown')
- # for keyword in extraction.keywords:
- # if keyword not in word_lib.words:
- # word_lib.add_word(keyword, source=f"note:{note_id}")
- # print(f" + 新词入库: {keyword} (来源: {note_id})")
- # 5. 处理部分匹配的帖子:改造query
- if partial_notes and len(satisfied_notes) < 5: # 如果满足的不够,基于部分匹配改进
- print(f"\n 基于 {len(partial_notes)} 个部分匹配帖子改造query...")
- # 收集所有缺失方面
- all_missing = []
- for note in partial_notes:
- all_missing.extend(note["evaluation"]["missing_aspects"])
- if all_missing:
- improvement_input = f"""
- <当前Query>
- {query}
- </当前Query>
- <缺失的方面>
- {', '.join(set(all_missing[:5]))}
- </缺失的方面>
- 请改造query使其包含这些缺失的内容。
- """
- result = await Runner.run(query_improver, improvement_input)
- improvement: QueryImprovement = result.final_output
- # 收集Query改造Agent的输入输出
- improvement_agent_call = {
- "agent": "Query改造专家",
- "action": "基于缺失方面改造Query",
- "input": {
- "query": query,
- "missing_aspects": list(set(all_missing[:5]))
- },
- "output": {
- "improved_query": improvement.improved_query,
- "added_aspects": improvement.added_aspects,
- "reasoning": improvement.reasoning
- }
- }
- agent_calls.append(improvement_agent_call)
- # 评估改进后的query
- improved_eval = await evaluate_query_relevance(improvement.improved_query, original_need, query_state.relevance_score, context)
- # 创建改进后的query state
- new_state = QueryState(
- query=improvement.improved_query,
- level=query_state.level + 1,
- relevance_score=improved_eval.relevance_score,
- parent_query=query,
- strategy="improve_from_partial"
- )
- # 添加到演化图(无论是否提升)
- add_query_to_graph(
- context,
- new_state,
- iteration,
- evaluation_reason=improved_eval.reason,
- is_selected=improved_eval.is_improved
- )
- if improved_eval.is_improved:
- print(f" ✓ 改进: {improvement.improved_query} (添加: {', '.join(improvement.added_aspects[:2])})")
- new_queries.append(new_state)
- else:
- print(f" ✗ 改进: {improvement.improved_query} (分数: {improved_eval.relevance_score:.2f}, 未提升)")
- # 6. Result分支的改写策略(向上抽象和同义改写)
- # 如果搜索结果不理想且新queries不够,尝试改写当前query
- if len(satisfied_notes) < 3 and len(new_queries) < 2:
- print(f"\n 搜索结果不理想,尝试改写query...")
- # 6.1 向上抽象
- if len(new_queries) < 3:
- rewrite_input_abstract = f"""
- <当前Query>
- {query}
- </当前Query>
- <改写要求>
- 类型: abstract (向上抽象)
- </改写要求>
- 请改写这个query。
- """
- result = await Runner.run(query_rewriter, rewrite_input_abstract)
- rewrite: QueryRewrite = result.final_output
- # 收集Result分支改写(抽象)Agent的输入输出
- rewrite_agent_call = {
- "agent": "Query改写专家",
- "action": "向上抽象改写(Result分支)",
- "input": {
- "query": query,
- "rewrite_type": "abstract"
- },
- "output": {
- "rewritten_query": rewrite.rewritten_query,
- "rewrite_type": rewrite.rewrite_type,
- "reasoning": rewrite.reasoning
- }
- }
- agent_calls.append(rewrite_agent_call)
- # 评估改写后的query
- rewrite_eval = await evaluate_query_relevance(rewrite.rewritten_query, original_need, query_state.relevance_score, context)
- # 创建改写后的query state
- new_state = QueryState(
- query=rewrite.rewritten_query,
- level=query_state.level + 1,
- relevance_score=rewrite_eval.relevance_score,
- parent_query=query,
- strategy="result_rewrite_abstract"
- )
- # 添加到演化图(无论是否提升)
- add_query_to_graph(
- context,
- new_state,
- iteration,
- evaluation_reason=rewrite_eval.reason,
- is_selected=rewrite_eval.is_improved
- )
- if rewrite_eval.is_improved:
- print(f" ✓ 改写(抽象): {rewrite.rewritten_query} (分数: {rewrite_eval.relevance_score:.2f})")
- new_queries.append(new_state)
- else:
- print(f" ✗ 改写(抽象): {rewrite.rewritten_query} (分数: {rewrite_eval.relevance_score:.2f}, 未提升)")
- # 6.2 同义改写
- if len(new_queries) < 4:
- rewrite_input_synonym = f"""
- <当前Query>
- {query}
- </当前Query>
- <改写要求>
- 类型: synonym (同义改写)
- 使用同义词或相关表达来改写query,保持语义相同但表达方式不同。
- </改写要求>
- 请改写这个query。
- """
- result = await Runner.run(query_rewriter, rewrite_input_synonym)
- rewrite_syn: QueryRewrite = result.final_output
- # 收集Result分支改写(同义)Agent的输入输出
- rewrite_syn_agent_call = {
- "agent": "Query改写专家",
- "action": "同义改写(Result分支)",
- "input": {
- "query": query,
- "rewrite_type": "synonym"
- },
- "output": {
- "rewritten_query": rewrite_syn.rewritten_query,
- "rewrite_type": rewrite_syn.rewrite_type,
- "reasoning": rewrite_syn.reasoning
- }
- }
- agent_calls.append(rewrite_syn_agent_call)
- # 评估改写后的query
- rewrite_syn_eval = await evaluate_query_relevance(rewrite_syn.rewritten_query, original_need, query_state.relevance_score, context)
- # 创建改写后的query state
- new_state = QueryState(
- query=rewrite_syn.rewritten_query,
- level=query_state.level + 1,
- relevance_score=rewrite_syn_eval.relevance_score,
- parent_query=query,
- strategy="result_rewrite_synonym"
- )
- # 添加到演化图(无论是否提升)
- add_query_to_graph(
- context,
- new_state,
- iteration,
- evaluation_reason=rewrite_syn_eval.reason,
- is_selected=rewrite_syn_eval.is_improved
- )
- if rewrite_syn_eval.is_improved:
- print(f" ✓ 改写(同义): {rewrite_syn.rewritten_query} (分数: {rewrite_syn_eval.relevance_score:.2f})")
- new_queries.append(new_state)
- else:
- print(f" ✗ 改写(同义): {rewrite_syn.rewritten_query} (分数: {rewrite_syn_eval.relevance_score:.2f}, 未提升)")
- # 记录完整的result分支处理结果(层级化)
- add_step(context, f"Result分支 - {query}", "result_branch", {
- "query": query,
- "query_level": query_state.level,
- "query_relevance": query_state.relevance_score,
- "relevance_threshold": relevance_threshold,
- "passed_threshold": query_state.relevance_score >= relevance_threshold,
- "notes_count": len(notes) if 'notes' in locals() else 0,
- "satisfied_count": len(satisfied_notes),
- "partial_count": len(partial_notes),
- "satisfied_notes": [
- {
- "note_id": note["note_id"],
- "title": note["title"],
- "score": note["evaluation"]["relevance_score"],
- "match_level": note["evaluation"]["match_level"]
- }
- for note in satisfied_notes[:10] # 只保存前10个
- ],
- "agent_calls": agent_calls, # 所有Agent调用的详细记录
- "new_queries_generated": len(new_queries),
- "new_queries": [{"query": nq.query, "score": nq.relevance_score, "strategy": nq.strategy} for nq in new_queries]
- })
- return satisfied_notes, new_queries
- async def iterative_search_loop(
- context: RunContext,
- max_iterations: int = 20,
- max_concurrent_queries: int = 5,
- relevance_threshold: float = 0.6
- ) -> list[dict]:
- """
- 主循环:迭代搜索
- Args:
- context: 运行上下文
- max_iterations: 最大迭代次数
- max_concurrent_queries: 最大并发query数量
- relevance_threshold: 相关度门槛
- Returns:
- 满足需求的帖子列表
- """
- print(f"\n{'='*60}")
- print(f"开始迭代搜索循环")
- print(f"{'='*60}")
- # 0. 添加原始问题作为根节点
- root_query_state = QueryState(
- query=context.q,
- level=0,
- relevance_score=1.0, # 原始问题本身相关度为1.0
- strategy="root"
- )
- add_query_to_graph(context, root_query_state, 0, evaluation_reason="原始问题,作为搜索的根节点", is_selected=True)
- print(f"[根节点] 原始问题: {context.q}")
- # 1. 初始化分词库
- word_lib = await initialize_word_library(context.q, context)
- # 2. 初始化query队列 - 智能选择最相关的词
- all_words = list(word_lib.words)
- query_queue = []
- print(f"\n评估所有初始分词的相关度...")
- word_scores = []
- for word in all_words:
- # 评估每个词的相关度
- eval_result = await evaluate_query_relevance(word, context.q, None, context)
- word_scores.append({
- 'word': word,
- 'score': eval_result.relevance_score,
- 'eval': eval_result
- })
- print(f" {word}: {eval_result.relevance_score:.2f}")
- # 按相关度排序,选择top 3
- word_scores.sort(key=lambda x: x['score'], reverse=True)
- selected_words = word_scores[:3]
- # 将所有分词添加到演化图(包括未被选中的)
- for item in word_scores:
- is_selected = item in selected_words
- query_state = QueryState(
- query=item['word'],
- level=1,
- relevance_score=item['score'],
- strategy="initial",
- parent_query=context.q # 父节点是原始问题
- )
- # 添加到演化图(会自动创建从parent_query到该query的边)
- add_query_to_graph(context, query_state, 0, evaluation_reason=item['eval'].reason, is_selected=is_selected)
- # 只有被选中的才加入队列
- if is_selected:
- query_queue.append(query_state)
- print(f"\n初始query队列(按相关度选择): {[(q.query, f'{q.relevance_score:.2f}') for q in query_queue]}")
- print(f" (共评估了 {len(word_scores)} 个分词,选择了前 {len(query_queue)} 个)")
- # 3. API实例
- xiaohongshu_api = XiaohongshuSearchRecommendations()
- xiaohongshu_search = XiaohongshuSearch()
- # 4. 主循环
- all_satisfied_notes = []
- iteration = 0
- while query_queue and iteration < max_iterations:
- iteration += 1
- print(f"\n{'='*60}")
- print(f"迭代 {iteration}: 队列中有 {len(query_queue)} 个query")
- print(f"{'='*60}")
- # 限制并发数量
- current_batch = query_queue[:max_concurrent_queries]
- query_queue = query_queue[max_concurrent_queries:]
- # 记录本轮处理的queries
- add_step(context, f"迭代 {iteration}", "iteration", {
- "iteration": iteration,
- "queue_size": len(query_queue) + len(current_batch),
- "processing_queries": [q.query for q in current_batch]
- })
- new_queries_from_sug = []
- new_queries_from_result = []
- # 处理每个query
- for query_state in current_batch:
- print(f"\n处理Query [{query_state.level}]: {query_state.query} (分数: {query_state.relevance_score:.2f})")
- # 检查终止条件
- if query_state.is_terminated or query_state.no_suggestion_rounds >= 2:
- print(f" ✗ 已终止或连续2轮无suggestion,跳过该query")
- query_state.is_terminated = True
- continue
- # 并行处理两个分支
- sug_task = process_suggestions(
- query_state.query, query_state, context.q, word_lib, context, xiaohongshu_api, iteration
- )
- result_task = process_search_results(
- query_state.query, query_state, context.q, word_lib, context,
- xiaohongshu_search, relevance_threshold, iteration
- )
- # 等待两个分支完成
- sug_queries, (satisfied_notes, result_queries) = await asyncio.gather(
- sug_task,
- result_task
- )
- # 如果suggestion分支返回空,说明没有获取到suggestion,需要继承no_suggestion_rounds
- # 注意:process_suggestions内部已经更新了query_state.no_suggestion_rounds
- # 所以这里生成的新queries需要继承父query的no_suggestion_rounds(如果sug分支也返回空)
- if not sug_queries and not result_queries:
- # 两个分支都没有产生新query,标记当前query为终止
- query_state.is_terminated = True
- print(f" ⚠ 两个分支均未产生新query,标记该query为终止")
- new_queries_from_sug.extend(sug_queries)
- new_queries_from_result.extend(result_queries)
- all_satisfied_notes.extend(satisfied_notes)
- # 更新队列
- all_new_queries = new_queries_from_sug + new_queries_from_result
- # 将新生成的queries添加到演化图
- for new_q in all_new_queries:
- add_query_to_graph(context, new_q, iteration)
- query_queue.extend(all_new_queries)
- # 去重(基于query文本)并过滤已终止的query
- seen = set()
- unique_queue = []
- for q in query_queue:
- if q.query not in seen and not q.is_terminated:
- seen.add(q.query)
- unique_queue.append(q)
- query_queue = unique_queue
- # 按相关度排序
- query_queue.sort(key=lambda x: x.relevance_score, reverse=True)
- print(f"\n本轮结果:")
- print(f" 新增满足帖子: {len(satisfied_notes)}")
- print(f" 累计满足帖子: {len(all_satisfied_notes)}")
- print(f" 新增queries: {len(all_new_queries)}")
- print(f" 队列剩余: {len(query_queue)}")
- # 更新分词库到context
- context.word_library = word_lib.model_dump()
- # 如果满足条件的帖子足够多,可以提前结束
- if len(all_satisfied_notes) >= 20:
- print(f"\n已找到足够的满足帖子 ({len(all_satisfied_notes)}个),提前结束")
- break
- print(f"\n{'='*60}")
- print(f"迭代搜索完成")
- print(f" 总迭代次数: {iteration}")
- print(f" 最终满足帖子数: {len(all_satisfied_notes)}")
- print(f" 最终分词库大小: {len(word_lib.words)}")
- print(f"{'='*60}")
- # 保存最终结果
- add_step(context, "迭代搜索完成", "loop_complete", {
- "total_iterations": iteration,
- "total_satisfied_notes": len(all_satisfied_notes),
- "final_word_library_size": len(word_lib.words),
- "final_word_library": list(word_lib.words)
- })
- return all_satisfied_notes
- # ============================================================================
- # 主函数
- # ============================================================================
- async def main(input_dir: str, max_iterations: int = 20, visualize: bool = False):
- """主函数"""
- current_time, log_url = set_trace()
- # 读取输入
- input_context_file = os.path.join(input_dir, 'context.md')
- input_q_file = os.path.join(input_dir, 'q.md')
- q_context = read_file_as_string(input_context_file)
- q = read_file_as_string(input_q_file)
- q_with_context = f"""
- <需求上下文>
- {q_context}
- </需求上下文>
- <当前问题>
- {q}
- </当前问题>
- """.strip()
- # 版本信息
- version = os.path.basename(__file__)
- version_name = os.path.splitext(version)[0]
- # 日志目录
- log_dir = os.path.join(input_dir, "output", version_name, current_time)
- # 创建运行上下文
- run_context = RunContext(
- version=version,
- input_files={
- "input_dir": input_dir,
- "context_file": input_context_file,
- "q_file": input_q_file,
- },
- q_with_context=q_with_context,
- q_context=q_context,
- q=q,
- log_dir=log_dir,
- log_url=log_url,
- )
- # 执行迭代搜索
- satisfied_notes = await iterative_search_loop(
- run_context,
- max_iterations=max_iterations,
- max_concurrent_queries=3,
- relevance_threshold=0.6
- )
- # 保存结果
- run_context.satisfied_notes = satisfied_notes
- # 格式化输出
- output = f"原始问题:{run_context.q}\n"
- output += f"找到满足需求的帖子:{len(satisfied_notes)} 个\n"
- output += f"分词库大小:{len(run_context.word_library.get('words', []))} 个词\n"
- output += "\n" + "="*60 + "\n"
- if satisfied_notes:
- output += "【满足需求的帖子】\n\n"
- for idx, note in enumerate(satisfied_notes[:10], 1):
- output += f"{idx}. {note['title']}\n"
- output += f" 相关度: {note['evaluation']['relevance_score']:.2f}\n"
- output += f" URL: {note['note_url']}\n\n"
- else:
- output += "未找到满足需求的帖子\n"
- run_context.final_output = output
- print(f"\n{'='*60}")
- print("最终结果")
- print(f"{'='*60}")
- print(output)
- # 保存日志
- os.makedirs(run_context.log_dir, exist_ok=True)
- context_file_path = os.path.join(run_context.log_dir, "run_context.json")
- context_dict = run_context.model_dump()
- with open(context_file_path, "w", encoding="utf-8") as f:
- json.dump(context_dict, f, ensure_ascii=False, indent=2)
- print(f"\nRunContext saved to: {context_file_path}")
- steps_file_path = os.path.join(run_context.log_dir, "steps.json")
- with open(steps_file_path, "w", encoding="utf-8") as f:
- json.dump(run_context.steps, f, ensure_ascii=False, indent=2)
- print(f"Steps log saved to: {steps_file_path}")
- # 保存Query演化图
- query_graph_file_path = os.path.join(run_context.log_dir, "query_graph.json")
- with open(query_graph_file_path, "w", encoding="utf-8") as f:
- json.dump(run_context.query_graph, f, ensure_ascii=False, indent=2)
- print(f"Query graph saved to: {query_graph_file_path}")
- # 可视化
- if visualize:
- import subprocess
- output_html = os.path.join(run_context.log_dir, "visualization.html")
- print(f"\n🎨 生成可视化HTML...")
- result = subprocess.run([
- "python", "sug_v6_1_2_3.visualize.py",
- steps_file_path,
- "-o", output_html
- ])
- if result.returncode == 0:
- print(f"✅ 可视化已生成: {output_html}")
- else:
- print(f"❌ 可视化生成失败")
- if __name__ == "__main__":
- parser = argparse.ArgumentParser(description="搜索query优化工具 - v6.1.2.5 迭代循环版")
- parser.add_argument(
- "--input-dir",
- type=str,
- default="input/简单扣图",
- help="输入目录路径,默认: input/简单扣图"
- )
- parser.add_argument(
- "--max-iterations",
- type=int,
- default=20,
- help="最大迭代次数,默认: 20"
- )
- parser.add_argument(
- "--visualize",
- action="store_true",
- default=False,
- help="运行完成后自动生成可视化HTML"
- )
- args = parser.parse_args()
- asyncio.run(main(args.input_dir, max_iterations=args.max_iterations, visualize=args.visualize))
|