runner.py 88 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180
  1. """
  2. Agent Runner - Agent 执行引擎
  3. 核心职责:
  4. 1. 执行 Agent 任务(循环调用 LLM + 工具)
  5. 2. 记录执行轨迹(Trace + Messages + GoalTree)
  6. 3. 加载和注入技能(Skill)
  7. 4. 管理执行计划(GoalTree)
  8. 5. 支持续跑(continue)和回溯重跑(rewind)
  9. 参数分层:
  10. - Infrastructure: AgentRunner 构造时设置(trace_store, llm_call 等)
  11. - RunConfig: 每次 run 时指定(model, trace_id, after_sequence 等)
  12. - Messages: OpenAI SDK 格式的任务消息
  13. """
  14. import asyncio
  15. import json
  16. import logging
  17. import os
  18. import uuid
  19. from dataclasses import dataclass, field
  20. from datetime import datetime
  21. from typing import AsyncIterator, Optional, Dict, Any, List, Callable, Literal, Tuple, Union
  22. from agent.trace.models import Trace, Message
  23. from agent.trace.protocols import TraceStore
  24. from agent.trace.goal_models import GoalTree
  25. from agent.trace.compaction import (
  26. CompressionConfig,
  27. compress_completed_goals,
  28. estimate_tokens,
  29. needs_level2_compression,
  30. build_compression_prompt,
  31. )
  32. from agent.skill.models import Skill
  33. from agent.skill.skill_loader import load_skills_from_dir
  34. from agent.tools import ToolRegistry, get_tool_registry
  35. from agent.tools.builtin.knowledge import KnowledgeConfig
  36. from agent.core.prompts import (
  37. DEFAULT_SYSTEM_PREFIX,
  38. TRUNCATION_HINT,
  39. TOOL_INTERRUPTED_MESSAGE,
  40. AGENT_INTERRUPTED_SUMMARY,
  41. AGENT_CONTINUE_HINT_TEMPLATE,
  42. TASK_NAME_GENERATION_SYSTEM_PROMPT,
  43. TASK_NAME_FALLBACK,
  44. SUMMARY_HEADER_TEMPLATE,
  45. build_summary_header,
  46. build_tool_interrupted_message,
  47. build_agent_continue_hint,
  48. )
  49. logger = logging.getLogger(__name__)
  50. @dataclass
  51. class ContextUsage:
  52. """Context 使用情况"""
  53. trace_id: str
  54. message_count: int
  55. token_count: int
  56. max_tokens: int
  57. usage_percent: float
  58. image_count: int = 0
  59. @dataclass
  60. class SideBranchContext:
  61. """侧分支上下文(压缩/反思)"""
  62. type: Literal["compression", "reflection"]
  63. branch_id: str
  64. start_head_seq: int # 侧分支起点的 head_seq
  65. start_sequence: int # 侧分支第一条消息的 sequence
  66. start_history_length: int # 侧分支起点的 history 长度
  67. start_iteration: int # 侧分支开始时的 iteration
  68. max_turns: int = 5 # 最大轮次
  69. def to_dict(self) -> Dict[str, Any]:
  70. """转换为字典(用于持久化和传递给工具)"""
  71. return {
  72. "type": self.type,
  73. "branch_id": self.branch_id,
  74. "start_head_seq": self.start_head_seq,
  75. "start_sequence": self.start_sequence,
  76. "start_iteration": self.start_iteration,
  77. "max_turns": self.max_turns,
  78. "is_side_branch": True,
  79. "started_at": datetime.now().isoformat(),
  80. }
  81. # ===== 运行配置 =====
  82. @dataclass
  83. class RunConfig:
  84. """
  85. 运行参数 — 控制 Agent 如何执行
  86. 分为模型层参数(由上游 agent 或用户决定)和框架层参数(由系统注入)。
  87. """
  88. # --- 模型层参数 ---
  89. model: str = "gpt-4o"
  90. temperature: float = 0.3
  91. max_iterations: int = 200
  92. tools: Optional[List[str]] = None # None = 全部已注册工具
  93. side_branch_max_turns: int = 5 # 侧分支最大轮次(压缩/反思)
  94. goal_compression: Literal["none", "on_complete", "on_overflow"] = "on_overflow" # Goal 压缩模式
  95. # --- 强制侧分支(用于 API 手动触发或自动压缩流程)---
  96. # 使用列表作为侧分支队列,每次完成一个侧分支后 pop(0) 取下一个
  97. force_side_branch: Optional[List[Literal["compression", "reflection"]]] = None
  98. # --- 框架层参数 ---
  99. agent_type: str = "default"
  100. # Agent 实例标识(用于知识 owner 兜底);为空时回退到 uid
  101. agent_id: Optional[str] = None
  102. uid: Optional[str] = None
  103. system_prompt: Optional[str] = None # None = 从 skills 自动构建
  104. skills: Optional[List[str]] = None # 注入 system prompt 的 skill 名称列表;None = 按 preset 决定
  105. enable_memory: bool = True
  106. auto_execute_tools: bool = True
  107. name: Optional[str] = None # 显示名称(空则由 utility_llm 自动生成)
  108. enable_prompt_caching: bool = True # 启用 Anthropic Prompt Caching(仅 Claude 模型有效)
  109. # --- Trace 控制 ---
  110. trace_id: Optional[str] = None # None = 新建
  111. parent_trace_id: Optional[str] = None # 子 Agent 专用
  112. parent_goal_id: Optional[str] = None
  113. # --- 续跑控制 ---
  114. after_sequence: Optional[int] = None # 从哪条消息后续跑(message sequence)
  115. # --- 额外 LLM 参数(传给 llm_call 的 **kwargs)---
  116. extra_llm_params: Dict[str, Any] = field(default_factory=dict)
  117. # --- 自定义元数据上下文 ---
  118. context: Dict[str, Any] = field(default_factory=dict)
  119. # --- 研究流程控制 ---
  120. enable_research_flow: bool = True # 是否启用自动研究流程(知识检索→经验检索→调研→计划)
  121. # --- 知识管理配置 ---
  122. knowledge: KnowledgeConfig = field(default_factory=KnowledgeConfig)
  123. # 内置工具列表(始终自动加载)
  124. BUILTIN_TOOLS = [
  125. # 文件操作工具
  126. "read_file",
  127. "edit_file",
  128. "write_file",
  129. "glob_files",
  130. "grep_content",
  131. # 系统工具
  132. "bash_command",
  133. # 技能和目标管理
  134. "skill",
  135. "list_skills",
  136. "goal",
  137. "agent",
  138. "evaluate",
  139. "get_current_context",
  140. # 搜索工具
  141. "search_posts",
  142. "get_search_suggestions",
  143. # 知识管理工具
  144. "knowledge_search",
  145. "knowledge_save",
  146. "knowledge_update",
  147. "knowledge_batch_update",
  148. "knowledge_list",
  149. "knowledge_slim",
  150. # 沙箱工具
  151. # "sandbox_create_environment",
  152. # "sandbox_run_shell",
  153. # "sandbox_rebuild_with_ports",
  154. # "sandbox_destroy_environment",
  155. # 浏览器工具
  156. "browser_get_live_url",
  157. "browser_navigate_to_url",
  158. "browser_search_web",
  159. "browser_go_back",
  160. "browser_wait",
  161. "browser_click_element",
  162. "browser_input_text",
  163. "browser_send_keys",
  164. "browser_upload_file",
  165. "browser_scroll_page",
  166. "browser_find_text",
  167. "browser_screenshot",
  168. "browser_switch_tab",
  169. "browser_close_tab",
  170. "browser_get_dropdown_options",
  171. "browser_select_dropdown_option",
  172. "browser_extract_content",
  173. "browser_read_long_content",
  174. "browser_download_direct_url",
  175. "browser_get_page_html",
  176. "browser_get_visual_selector_map",
  177. "browser_evaluate",
  178. "browser_ensure_login_with_cookies",
  179. # 可以暂时由飞书消息替代
  180. #"browser_wait_for_user_action",
  181. "browser_done",
  182. "browser_export_cookies",
  183. "browser_load_cookies",
  184. # 飞书工具
  185. "feishu_send_message_to_contact",
  186. "feishu_get_chat_history",
  187. "feishu_get_contact_replies",
  188. "feishu_get_contact_list",
  189. ]
  190. @dataclass
  191. class CallResult:
  192. """单次调用结果"""
  193. reply: str
  194. tool_calls: Optional[List[Dict]] = None
  195. trace_id: Optional[str] = None
  196. step_id: Optional[str] = None
  197. tokens: Optional[Dict[str, int]] = None
  198. cost: float = 0.0
  199. # ===== 执行引擎 =====
  200. CONTEXT_INJECTION_INTERVAL = 10 # 每 N 轮注入一次 GoalTree + Collaborators
  201. class AgentRunner:
  202. """
  203. Agent 执行引擎
  204. 支持三种运行模式(通过 RunConfig 区分):
  205. 1. 新建:trace_id=None
  206. 2. 续跑:trace_id=已有ID, after_sequence=None 或 == head
  207. 3. 回溯:trace_id=已有ID, after_sequence=N(N < head_sequence)
  208. """
  209. def __init__(
  210. self,
  211. trace_store: Optional[TraceStore] = None,
  212. tool_registry: Optional[ToolRegistry] = None,
  213. llm_call: Optional[Callable] = None,
  214. utility_llm_call: Optional[Callable] = None,
  215. skills_dir: Optional[str] = None,
  216. goal_tree: Optional[GoalTree] = None,
  217. debug: bool = False,
  218. ):
  219. """
  220. 初始化 AgentRunner
  221. Args:
  222. trace_store: Trace 存储
  223. tool_registry: 工具注册表(默认使用全局注册表)
  224. llm_call: 主 LLM 调用函数
  225. utility_llm_call: 轻量 LLM(用于生成任务标题等),可选
  226. skills_dir: Skills 目录路径
  227. goal_tree: 初始 GoalTree(可选)
  228. debug: 保留参数(已废弃)
  229. """
  230. self.trace_store = trace_store
  231. self.tools = tool_registry or get_tool_registry()
  232. self.llm_call = llm_call
  233. self.utility_llm_call = utility_llm_call
  234. self.skills_dir = skills_dir
  235. self.goal_tree = goal_tree
  236. self.debug = debug
  237. self._cancel_events: Dict[str, asyncio.Event] = {} # trace_id → cancel event
  238. # 知识保存跟踪(每个 trace 独立)
  239. self._saved_knowledge_ids: Dict[str, List[str]] = {} # trace_id → [knowledge_ids]
  240. # Context 使用跟踪
  241. self._context_warned: Dict[str, set] = {} # trace_id → {30, 50, 80} 已警告过的阈值
  242. self._context_usage: Dict[str, ContextUsage] = {} # trace_id → 当前用量快照
  243. # ===== 核心公开方法 =====
  244. def get_context_usage(self, trace_id: str) -> Optional[ContextUsage]:
  245. """获取指定 trace 的 context 使用情况"""
  246. return self._context_usage.get(trace_id)
  247. async def run(
  248. self,
  249. messages: List[Dict],
  250. config: Optional[RunConfig] = None,
  251. ) -> AsyncIterator[Union[Trace, Message]]:
  252. """
  253. Agent 模式执行(核心方法)
  254. Args:
  255. messages: OpenAI SDK 格式的输入消息
  256. 新建: 初始任务消息 [{"role": "user", "content": "..."}]
  257. 续跑: 追加的新消息
  258. 回溯: 在插入点之后追加的消息
  259. config: 运行配置
  260. Yields:
  261. Union[Trace, Message]: Trace 对象(状态变化)或 Message 对象(执行过程)
  262. """
  263. if not self.llm_call:
  264. raise ValueError("llm_call function not provided")
  265. config = config or RunConfig()
  266. trace = None
  267. try:
  268. # Phase 1: PREPARE TRACE
  269. trace, goal_tree, sequence = await self._prepare_trace(messages, config)
  270. # 注册取消事件
  271. self._cancel_events[trace.trace_id] = asyncio.Event()
  272. yield trace
  273. # 检查是否有未完成的侧分支(用于用户追加消息场景)
  274. side_branch_ctx_for_build: Optional[SideBranchContext] = None
  275. if trace.context.get("active_side_branch") and messages:
  276. side_branch_data = trace.context["active_side_branch"]
  277. # 创建侧分支上下文(用于标记用户追加的消息)
  278. side_branch_ctx_for_build = SideBranchContext(
  279. type=side_branch_data["type"],
  280. branch_id=side_branch_data["branch_id"],
  281. start_head_seq=side_branch_data["start_head_seq"],
  282. start_sequence=side_branch_data["start_sequence"],
  283. start_history_length=0,
  284. start_iteration=side_branch_data.get("start_iteration", 0),
  285. max_turns=side_branch_data.get("max_turns", config.side_branch_max_turns),
  286. )
  287. # Phase 2: BUILD HISTORY
  288. history, sequence, created_messages, head_seq = await self._build_history(
  289. trace.trace_id, messages, goal_tree, config, sequence, side_branch_ctx_for_build
  290. )
  291. # Update trace's head_sequence in memory
  292. trace.head_sequence = head_seq
  293. for msg in created_messages:
  294. yield msg
  295. # Phase 3: AGENT LOOP
  296. async for event in self._agent_loop(trace, history, goal_tree, config, sequence):
  297. yield event
  298. except Exception as e:
  299. logger.error(f"Agent run failed: {e}")
  300. tid = config.trace_id or (trace.trace_id if trace else None)
  301. if self.trace_store and tid:
  302. # 读取当前 last_sequence 作为 head_sequence,确保续跑时能加载完整历史
  303. current = await self.trace_store.get_trace(tid)
  304. head_seq = current.last_sequence if current else None
  305. await self.trace_store.update_trace(
  306. tid,
  307. status="failed",
  308. head_sequence=head_seq,
  309. error_message=str(e),
  310. completed_at=datetime.now()
  311. )
  312. trace_obj = await self.trace_store.get_trace(tid)
  313. if trace_obj:
  314. yield trace_obj
  315. raise
  316. finally:
  317. # 清理取消事件
  318. if trace:
  319. self._cancel_events.pop(trace.trace_id, None)
  320. async def run_result(
  321. self,
  322. messages: List[Dict],
  323. config: Optional[RunConfig] = None,
  324. on_event: Optional[Callable] = None,
  325. ) -> Dict[str, Any]:
  326. """
  327. 结果模式 — 消费 run(),返回结构化结果。
  328. 主要用于 agent/evaluate 工具内部。
  329. Args:
  330. on_event: 可选回调,每个 Trace/Message 事件触发一次,用于实时输出子 Agent 执行过程。
  331. """
  332. last_assistant_text = ""
  333. final_trace: Optional[Trace] = None
  334. async for item in self.run(messages=messages, config=config):
  335. if on_event:
  336. on_event(item)
  337. if isinstance(item, Message) and item.role == "assistant":
  338. content = item.content
  339. text = ""
  340. if isinstance(content, dict):
  341. text = content.get("text", "") or ""
  342. elif isinstance(content, str):
  343. text = content
  344. if text and text.strip():
  345. last_assistant_text = text
  346. elif isinstance(item, Trace):
  347. final_trace = item
  348. config = config or RunConfig()
  349. if not final_trace and config.trace_id and self.trace_store:
  350. final_trace = await self.trace_store.get_trace(config.trace_id)
  351. status = final_trace.status if final_trace else "unknown"
  352. error = final_trace.error_message if final_trace else None
  353. summary = last_assistant_text
  354. if not summary:
  355. status = "failed"
  356. error = error or "Agent 没有产生 assistant 文本结果"
  357. # 获取保存的知识 ID
  358. trace_id = final_trace.trace_id if final_trace else config.trace_id
  359. saved_knowledge_ids = self._saved_knowledge_ids.get(trace_id, [])
  360. return {
  361. "status": status,
  362. "summary": summary,
  363. "trace_id": trace_id,
  364. "error": error,
  365. "saved_knowledge_ids": saved_knowledge_ids, # 新增:返回保存的知识 ID
  366. "stats": {
  367. "total_messages": final_trace.total_messages if final_trace else 0,
  368. "total_tokens": final_trace.total_tokens if final_trace else 0,
  369. "total_cost": final_trace.total_cost if final_trace else 0.0,
  370. },
  371. }
  372. async def stop(self, trace_id: str) -> bool:
  373. """
  374. 停止运行中的 Trace
  375. 设置取消信号,agent loop 在下一个 LLM 调用前检查并退出。
  376. Trace 状态置为 "stopped"。
  377. Returns:
  378. True 如果成功发送停止信号,False 如果该 trace 不在运行中
  379. """
  380. cancel_event = self._cancel_events.get(trace_id)
  381. if cancel_event is None:
  382. return False
  383. cancel_event.set()
  384. return True
  385. # ===== 单次调用(保留)=====
  386. async def call(
  387. self,
  388. messages: List[Dict],
  389. model: str = "gpt-4o",
  390. tools: Optional[List[str]] = None,
  391. uid: Optional[str] = None,
  392. trace: bool = True,
  393. **kwargs
  394. ) -> CallResult:
  395. """
  396. 单次 LLM 调用(无 Agent Loop)
  397. """
  398. if not self.llm_call:
  399. raise ValueError("llm_call function not provided")
  400. trace_id = None
  401. message_id = None
  402. tool_schemas = self._get_tool_schemas(tools)
  403. if trace and self.trace_store:
  404. trace_obj = Trace.create(mode="call", uid=uid, model=model, tools=tool_schemas, llm_params=kwargs)
  405. trace_id = await self.trace_store.create_trace(trace_obj)
  406. result = await self.llm_call(messages=messages, model=model, tools=tool_schemas, **kwargs)
  407. if trace and self.trace_store and trace_id:
  408. msg = Message.create(
  409. trace_id=trace_id, role="assistant", sequence=1, goal_id=None,
  410. content={"text": result.get("content", ""), "tool_calls": result.get("tool_calls")},
  411. prompt_tokens=result.get("prompt_tokens", 0),
  412. completion_tokens=result.get("completion_tokens", 0),
  413. finish_reason=result.get("finish_reason"),
  414. cost=result.get("cost", 0),
  415. )
  416. message_id = await self.trace_store.add_message(msg)
  417. await self.trace_store.update_trace(trace_id, status="completed", completed_at=datetime.now())
  418. return CallResult(
  419. reply=result.get("content", ""),
  420. tool_calls=result.get("tool_calls"),
  421. trace_id=trace_id,
  422. step_id=message_id,
  423. tokens={"prompt": result.get("prompt_tokens", 0), "completion": result.get("completion_tokens", 0)},
  424. cost=result.get("cost", 0)
  425. )
  426. # ===== Phase 1: PREPARE TRACE =====
  427. async def _prepare_trace(
  428. self,
  429. messages: List[Dict],
  430. config: RunConfig,
  431. ) -> Tuple[Trace, Optional[GoalTree], int]:
  432. """
  433. 准备 Trace:创建新的或加载已有的
  434. Returns:
  435. (trace, goal_tree, next_sequence)
  436. """
  437. if config.trace_id:
  438. return await self._prepare_existing_trace(config)
  439. else:
  440. return await self._prepare_new_trace(messages, config)
  441. async def _prepare_new_trace(
  442. self,
  443. messages: List[Dict],
  444. config: RunConfig,
  445. ) -> Tuple[Trace, Optional[GoalTree], int]:
  446. """创建新 Trace"""
  447. trace_id = str(uuid.uuid4())
  448. # 生成任务名称
  449. task_name = config.name or await self._generate_task_name(messages)
  450. # 准备工具 Schema
  451. tool_schemas = self._get_tool_schemas(config.tools)
  452. trace_obj = Trace(
  453. trace_id=trace_id,
  454. mode="agent",
  455. task=task_name,
  456. agent_type=config.agent_type,
  457. parent_trace_id=config.parent_trace_id,
  458. parent_goal_id=config.parent_goal_id,
  459. uid=config.uid,
  460. model=config.model,
  461. tools=tool_schemas,
  462. llm_params={"temperature": config.temperature, **config.extra_llm_params},
  463. context=config.context,
  464. status="running",
  465. )
  466. goal_tree = self.goal_tree or GoalTree(mission=task_name)
  467. if self.trace_store:
  468. await self.trace_store.create_trace(trace_obj)
  469. await self.trace_store.update_goal_tree(trace_id, goal_tree)
  470. return trace_obj, goal_tree, 1
  471. async def _prepare_existing_trace(
  472. self,
  473. config: RunConfig,
  474. ) -> Tuple[Trace, Optional[GoalTree], int]:
  475. """加载已有 Trace(续跑或回溯)"""
  476. if not self.trace_store:
  477. raise ValueError("trace_store required for continue/rewind")
  478. trace_obj = await self.trace_store.get_trace(config.trace_id)
  479. if not trace_obj:
  480. raise ValueError(f"Trace not found: {config.trace_id}")
  481. goal_tree = await self.trace_store.get_goal_tree(config.trace_id)
  482. if goal_tree is None:
  483. # 防御性兜底:trace 存在但 goal.json 丢失时,创建空树
  484. goal_tree = GoalTree(mission=trace_obj.task or "Agent task")
  485. await self.trace_store.update_goal_tree(config.trace_id, goal_tree)
  486. # 自动判断行为:after_sequence 为 None 或 == head → 续跑;< head → 回溯
  487. after_seq = config.after_sequence
  488. # 如果 after_seq > head_sequence,说明 generator 被强制关闭时 store 的
  489. # head_sequence 未来得及更新(仍停在 Phase 2 写入的初始值)。
  490. # 用 last_sequence 修正 head_sequence,确保续跑时能看到完整历史。
  491. if after_seq is not None and after_seq > trace_obj.head_sequence:
  492. trace_obj.head_sequence = trace_obj.last_sequence
  493. await self.trace_store.update_trace(
  494. config.trace_id, head_sequence=trace_obj.head_sequence
  495. )
  496. if after_seq is not None and after_seq < trace_obj.head_sequence:
  497. # 回溯模式
  498. sequence = await self._rewind(config.trace_id, after_seq, goal_tree)
  499. else:
  500. # 续跑模式:从 last_sequence + 1 开始
  501. sequence = trace_obj.last_sequence + 1
  502. # 状态置为 running
  503. await self.trace_store.update_trace(
  504. config.trace_id,
  505. status="running",
  506. completed_at=None,
  507. )
  508. trace_obj.status = "running"
  509. # 广播状态变化给前端
  510. try:
  511. from agent.trace.websocket import broadcast_trace_status_changed
  512. await broadcast_trace_status_changed(config.trace_id, "running")
  513. except Exception:
  514. pass
  515. return trace_obj, goal_tree, sequence
  516. # ===== Phase 2: BUILD HISTORY =====
  517. async def _build_history(
  518. self,
  519. trace_id: str,
  520. new_messages: List[Dict],
  521. goal_tree: Optional[GoalTree],
  522. config: RunConfig,
  523. sequence: int,
  524. side_branch_ctx: Optional[SideBranchContext] = None,
  525. ) -> Tuple[List[Dict], int, List[Message], int]:
  526. """
  527. 构建完整的 LLM 消息历史
  528. 1. 从 head_sequence 沿 parent chain 加载主路径消息(续跑/回溯场景)
  529. 2. 构建 system prompt(新建时注入 skills)
  530. 3. 新建时:在第一条 user message 末尾注入当前经验
  531. 4. 追加 input messages(设置 parent_sequence 链接到当前 head)
  532. 5. 如果在侧分支中,追加的消息自动标记为侧分支消息
  533. Returns:
  534. (history, next_sequence, created_messages, head_sequence)
  535. created_messages: 本次新创建并持久化的 Message 列表,供 run() yield 给调用方
  536. head_sequence: 当前主路径头节点的 sequence
  537. """
  538. history: List[Dict] = []
  539. created_messages: List[Message] = []
  540. head_seq: Optional[int] = None # 当前主路径的头节点 sequence
  541. # 1. 加载已有 messages(通过主路径遍历)
  542. if config.trace_id and self.trace_store:
  543. trace_obj = await self.trace_store.get_trace(trace_id)
  544. if trace_obj and trace_obj.head_sequence > 0:
  545. main_path = await self.trace_store.get_main_path_messages(
  546. trace_id, trace_obj.head_sequence
  547. )
  548. # 修复 orphaned tool_calls(中断导致的 tool_call 无 tool_result)
  549. main_path, sequence = await self._heal_orphaned_tool_calls(
  550. main_path, trace_id, goal_tree, sequence,
  551. )
  552. history = [msg.to_llm_dict() for msg in main_path]
  553. if main_path:
  554. head_seq = main_path[-1].sequence
  555. # 2. 构建/注入 skills 到 system prompt
  556. has_system = any(m.get("role") == "system" for m in history)
  557. has_system_in_new = any(m.get("role") == "system" for m in new_messages)
  558. if not has_system:
  559. if has_system_in_new:
  560. # 入参消息已含 system,将 skills 注入其中(在 step 4 持久化之前)
  561. augmented = []
  562. for msg in new_messages:
  563. if msg.get("role") == "system":
  564. base = msg.get("content") or ""
  565. enriched = await self._build_system_prompt(config, base_prompt=base)
  566. augmented.append({**msg, "content": enriched or base})
  567. else:
  568. augmented.append(msg)
  569. new_messages = augmented
  570. else:
  571. # 没有 system,自动构建并插入历史
  572. system_prompt = await self._build_system_prompt(config)
  573. if system_prompt:
  574. history = [{"role": "system", "content": system_prompt}] + history
  575. if self.trace_store:
  576. system_msg = Message.create(
  577. trace_id=trace_id, role="system", sequence=sequence,
  578. goal_id=None, content=system_prompt,
  579. parent_sequence=None, # system message 是 root
  580. )
  581. await self.trace_store.add_message(system_msg)
  582. created_messages.append(system_msg)
  583. head_seq = sequence
  584. sequence += 1
  585. # 3. 追加新 messages(设置 parent_sequence 链接到当前 head)
  586. for msg_dict in new_messages:
  587. history.append(msg_dict)
  588. if self.trace_store:
  589. # 如果在侧分支中,标记为侧分支消息
  590. if side_branch_ctx:
  591. stored_msg = Message.create(
  592. trace_id=trace_id,
  593. role=msg_dict["role"],
  594. sequence=sequence,
  595. goal_id=goal_tree.current_id if goal_tree else None,
  596. parent_sequence=head_seq,
  597. branch_type=side_branch_ctx.type,
  598. branch_id=side_branch_ctx.branch_id,
  599. content=msg_dict.get("content"),
  600. )
  601. logger.info(f"用户在侧分支 {side_branch_ctx.type} 中追加消息")
  602. else:
  603. stored_msg = Message.from_llm_dict(
  604. msg_dict, trace_id=trace_id, sequence=sequence,
  605. goal_id=None, parent_sequence=head_seq,
  606. )
  607. await self.trace_store.add_message(stored_msg)
  608. created_messages.append(stored_msg)
  609. head_seq = sequence
  610. sequence += 1
  611. # 5. 更新 trace 的 head_sequence
  612. if self.trace_store and head_seq is not None:
  613. await self.trace_store.update_trace(trace_id, head_sequence=head_seq)
  614. return history, sequence, created_messages, head_seq or 0
  615. # ===== Phase 3: AGENT LOOP =====
  616. async def _manage_context_usage(
  617. self,
  618. trace_id: str,
  619. history: List[Dict],
  620. goal_tree: Optional[GoalTree],
  621. config: RunConfig,
  622. sequence: int,
  623. head_seq: int,
  624. ) -> Tuple[List[Dict], int, int, bool]:
  625. """
  626. 管理 context 用量:检查、预警、压缩
  627. Returns:
  628. (updated_history, new_head_seq, next_sequence, needs_enter_compression_branch)
  629. """
  630. compression_config = CompressionConfig()
  631. token_count = estimate_tokens(history)
  632. max_tokens = compression_config.get_max_tokens(config.model)
  633. # 计算使用率
  634. progress_pct = (token_count / max_tokens * 100) if max_tokens > 0 else 0
  635. msg_count = len(history)
  636. img_count = sum(
  637. 1 for msg in history
  638. if isinstance(msg.get("content"), list)
  639. for part in msg["content"]
  640. if isinstance(part, dict) and part.get("type") in ("image", "image_url")
  641. )
  642. # 更新 context usage 快照
  643. self._context_usage[trace_id] = ContextUsage(
  644. trace_id=trace_id,
  645. message_count=msg_count,
  646. token_count=token_count,
  647. max_tokens=max_tokens,
  648. usage_percent=progress_pct,
  649. image_count=img_count,
  650. )
  651. # 阈值警告(30%, 50%, 80%)
  652. if trace_id not in self._context_warned:
  653. self._context_warned[trace_id] = set()
  654. for threshold in [30, 50, 80]:
  655. if progress_pct >= threshold and threshold not in self._context_warned[trace_id]:
  656. self._context_warned[trace_id].add(threshold)
  657. logger.warning(
  658. f"Context 使用率达到 {threshold}%: {token_count:,} / {max_tokens:,} tokens ({msg_count} 条消息)"
  659. )
  660. # 检查是否需要压缩(token 或消息数量超限)
  661. needs_compression_by_tokens = token_count > max_tokens
  662. needs_compression_by_count = (
  663. compression_config.max_messages > 0 and
  664. msg_count > compression_config.max_messages
  665. )
  666. needs_compression = needs_compression_by_tokens or needs_compression_by_count
  667. if not needs_compression:
  668. return history, head_seq, sequence, False
  669. # 知识提取:在任何压缩发生前,用完整 history 做反思(进入反思侧分支)
  670. if config.knowledge.enable_extraction and not config.force_side_branch:
  671. # 设置侧分支队列:先反思,再压缩
  672. config.force_side_branch = ["reflection", "compression"]
  673. return history, head_seq, sequence, True
  674. # 以下为未启用反思、需要压缩的情况,直接进行level 1压缩,并检查是否需要进行level 2压缩(进入侧分支)
  675. # Level 1 压缩:Goal 完成压缩
  676. if config.goal_compression != "none" and self.trace_store and goal_tree:
  677. if head_seq > 0:
  678. main_path_msgs = await self.trace_store.get_main_path_messages(
  679. trace_id, head_seq
  680. )
  681. compressed_msgs = compress_completed_goals(main_path_msgs, goal_tree)
  682. if len(compressed_msgs) < len(main_path_msgs):
  683. logger.info(
  684. "Level 1 压缩: %d -> %d 条消息",
  685. len(main_path_msgs), len(compressed_msgs),
  686. )
  687. history = [msg.to_llm_dict() for msg in compressed_msgs]
  688. else:
  689. logger.info(
  690. "Level 1 压缩: 无可过滤消息 (%d 条全部保留)",
  691. len(main_path_msgs),
  692. )
  693. elif needs_compression:
  694. logger.warning(
  695. "消息数 (%d) 或 token 数 (%d) 超过阈值,但无法执行 Level 1 压缩(缺少 store 或 goal_tree,或 goal_compression=none)",
  696. msg_count, token_count,
  697. )
  698. # Level 2 压缩:检查 Level 1 后是否仍超阈值
  699. token_count_after = estimate_tokens(history)
  700. msg_count_after = len(history)
  701. needs_level2_by_tokens = token_count_after > max_tokens
  702. needs_level2_by_count = (
  703. compression_config.max_messages > 0 and
  704. msg_count_after > compression_config.max_messages
  705. )
  706. needs_level2 = needs_level2_by_tokens or needs_level2_by_count
  707. if needs_level2:
  708. logger.info(
  709. "Level 1 后仍超阈值 (消息数=%d/%d, token=%d/%d),需要进入压缩侧分支",
  710. msg_count_after, compression_config.max_messages, token_count_after, max_tokens,
  711. )
  712. # 如果还没有设置侧分支(说明没有启用知识提取),直接进入压缩
  713. if not config.force_side_branch:
  714. config.force_side_branch = ["compression"]
  715. # 返回标志,让主循环进入侧分支
  716. return history, head_seq, sequence, True
  717. # 压缩完成后,输出最终发给模型的消息列表
  718. logger.info("Level 1 压缩完成,发送给模型的消息列表:")
  719. for idx, msg in enumerate(history):
  720. role = msg.get("role", "unknown")
  721. content = msg.get("content", "")
  722. if isinstance(content, str):
  723. preview = content[:100] + ("..." if len(content) > 100 else "")
  724. elif isinstance(content, list):
  725. preview = f"[{len(content)} blocks]"
  726. else:
  727. preview = str(content)[:100]
  728. logger.info(f" [{idx}] {role}: {preview}")
  729. return history, head_seq, sequence, False
  730. async def _single_turn_compress(
  731. self,
  732. trace_id: str,
  733. history: List[Dict],
  734. goal_tree: Optional[GoalTree],
  735. config: RunConfig,
  736. ) -> str:
  737. """单次 LLM 调用生成压缩摘要,返回 summary 文本"""
  738. logger.info("执行单次 LLM 压缩")
  739. # 构建压缩 prompt(使用 SINGLE_TURN_PROMPT)
  740. from agent.core.prompts import build_single_turn_prompt
  741. goal_prompt = goal_tree.to_prompt(include_summary=True) if goal_tree else ""
  742. compress_prompt = build_single_turn_prompt(goal_prompt)
  743. compress_messages = list(history) + [
  744. {"role": "user", "content": compress_prompt}
  745. ]
  746. # 应用 Prompt Caching
  747. compress_messages = self._add_cache_control(
  748. compress_messages, config.model, config.enable_prompt_caching
  749. )
  750. # 单次 LLM 调用(无工具)
  751. result = await self.llm_call(
  752. messages=compress_messages,
  753. model=config.model,
  754. tools=[], # 不提供工具
  755. temperature=config.temperature,
  756. **config.extra_llm_params,
  757. )
  758. summary_text = result.get("content", "").strip()
  759. # 提取 [[SUMMARY]] 块
  760. if "[[SUMMARY]]" in summary_text:
  761. summary_text = summary_text[
  762. summary_text.index("[[SUMMARY]]") + len("[[SUMMARY]]"):
  763. ].strip()
  764. return summary_text
  765. async def _agent_loop(
  766. self,
  767. trace: Trace,
  768. history: List[Dict],
  769. goal_tree: Optional[GoalTree],
  770. config: RunConfig,
  771. sequence: int,
  772. ) -> AsyncIterator[Union[Trace, Message]]:
  773. """ReAct 循环"""
  774. trace_id = trace.trace_id
  775. tool_schemas = self._get_tool_schemas(config.tools)
  776. # 当前主路径头节点的 sequence(用于设置 parent_sequence)
  777. head_seq = trace.head_sequence
  778. # 侧分支状态(None = 主路径)
  779. side_branch_ctx: Optional[SideBranchContext] = None
  780. # 检查是否有未完成的侧分支需要恢复
  781. if trace.context.get("active_side_branch"):
  782. side_branch_data = trace.context["active_side_branch"]
  783. branch_id = side_branch_data["branch_id"]
  784. start_sequence = side_branch_data["start_sequence"]
  785. # 从数据库查询侧分支消息(按 sequence 范围)
  786. if self.trace_store:
  787. all_messages = await self.trace_store.get_trace_messages(trace_id)
  788. side_messages = [
  789. m for m in all_messages
  790. if m.sequence >= start_sequence
  791. ]
  792. # 恢复侧分支上下文
  793. side_branch_ctx = SideBranchContext(
  794. type=side_branch_data["type"],
  795. branch_id=branch_id,
  796. start_head_seq=side_branch_data["start_head_seq"],
  797. start_sequence=side_branch_data["start_sequence"],
  798. start_history_length=0, # 稍后重新计算
  799. start_iteration=side_branch_data.get("start_iteration", 0),
  800. max_turns=side_branch_data.get("max_turns", config.side_branch_max_turns),
  801. )
  802. logger.info(
  803. f"恢复未完成的侧分支: {side_branch_ctx.type}, "
  804. f"max_turns={side_branch_ctx.max_turns}"
  805. )
  806. # 将侧分支消息追加到 history
  807. for m in side_messages:
  808. history.append(m.to_llm_dict())
  809. # 重新计算 start_history_length
  810. side_branch_ctx.start_history_length = len(history) - len(side_messages)
  811. break_after_side_branch = False # 侧分支退出后是否 break 主循环
  812. for iteration in range(config.max_iterations):
  813. # 更新活动时间(表明trace正在活跃运行)
  814. if self.trace_store:
  815. await self.trace_store.update_trace(
  816. trace_id,
  817. last_activity_at=datetime.now()
  818. )
  819. # 检查取消信号
  820. cancel_event = self._cancel_events.get(trace_id)
  821. if cancel_event and cancel_event.is_set():
  822. logger.info(f"Trace {trace_id} stopped by user")
  823. if self.trace_store:
  824. await self.trace_store.update_trace(
  825. trace_id,
  826. status="stopped",
  827. head_sequence=head_seq,
  828. completed_at=datetime.now(),
  829. )
  830. # 广播状态变化给前端
  831. try:
  832. from agent.trace.websocket import broadcast_trace_status_changed
  833. await broadcast_trace_status_changed(trace_id, "stopped")
  834. except Exception:
  835. pass
  836. trace_obj = await self.trace_store.get_trace(trace_id)
  837. if trace_obj:
  838. yield trace_obj
  839. return
  840. # Context 管理(仅主路径)
  841. needs_enter_side_branch = False
  842. if not side_branch_ctx:
  843. # 侧分支退出后需要 break 主循环
  844. if break_after_side_branch and not config.force_side_branch:
  845. break
  846. # 检查是否强制进入侧分支(API 手动触发或自动压缩流程)
  847. if config.force_side_branch:
  848. needs_enter_side_branch = True
  849. logger.info(f"强制进入侧分支: {config.force_side_branch}")
  850. else:
  851. # 正常的 context 管理逻辑
  852. history, head_seq, sequence, needs_enter_side_branch = await self._manage_context_usage(
  853. trace_id, history, goal_tree, config, sequence, head_seq
  854. )
  855. # 进入侧分支
  856. if needs_enter_side_branch and not side_branch_ctx:
  857. # 从队列中取出第一个侧分支类型
  858. if config.force_side_branch and isinstance(config.force_side_branch, list) and len(config.force_side_branch) > 0:
  859. branch_type = config.force_side_branch.pop(0)
  860. logger.info(f"从队列取出侧分支: {branch_type}, 剩余队列: {config.force_side_branch}")
  861. elif config.knowledge.enable_extraction:
  862. # 兼容旧的单值模式(如果 force_side_branch 是字符串)
  863. branch_type = "reflection"
  864. else:
  865. # 自动触发:压缩
  866. branch_type = "compression"
  867. branch_id = f"{branch_type}_{uuid.uuid4().hex[:8]}"
  868. side_branch_ctx = SideBranchContext(
  869. type=branch_type,
  870. branch_id=branch_id,
  871. start_head_seq=head_seq,
  872. start_sequence=sequence,
  873. start_history_length=len(history),
  874. start_iteration=iteration,
  875. max_turns=config.side_branch_max_turns,
  876. )
  877. # 持久化侧分支状态
  878. if self.trace_store:
  879. trace.context["active_side_branch"] = {
  880. "type": side_branch_ctx.type,
  881. "branch_id": side_branch_ctx.branch_id,
  882. "start_head_seq": side_branch_ctx.start_head_seq,
  883. "start_sequence": side_branch_ctx.start_sequence,
  884. "start_iteration": side_branch_ctx.start_iteration,
  885. "max_turns": side_branch_ctx.max_turns,
  886. "started_at": datetime.now().isoformat(),
  887. }
  888. await self.trace_store.update_trace(
  889. trace_id,
  890. context=trace.context
  891. )
  892. # 追加侧分支 prompt
  893. if branch_type == "reflection":
  894. prompt = config.knowledge.get_reflect_prompt()
  895. else: # compression
  896. from agent.trace.compaction import build_compression_prompt
  897. prompt = build_compression_prompt(goal_tree)
  898. branch_user_msg = Message.create(
  899. trace_id=trace_id,
  900. role="user",
  901. sequence=sequence,
  902. parent_sequence=head_seq,
  903. goal_id=goal_tree.current_id if goal_tree else None,
  904. branch_type=branch_type,
  905. branch_id=branch_id,
  906. content=prompt,
  907. )
  908. if self.trace_store:
  909. await self.trace_store.add_message(branch_user_msg)
  910. history.append(branch_user_msg.to_llm_dict())
  911. head_seq = sequence
  912. sequence += 1
  913. logger.info(f"进入侧分支: {branch_type}, branch_id={branch_id}")
  914. continue # 跳过本轮,下一轮开始侧分支
  915. # 构建 LLM messages(注入上下文)
  916. llm_messages = list(history)
  917. # 对历史消息应用 Prompt Caching
  918. llm_messages = self._add_cache_control(
  919. llm_messages,
  920. config.model,
  921. config.enable_prompt_caching
  922. )
  923. # 调用 LLM(等待完成后再检查 cancel 信号,不中断正在进行的调用)
  924. result = await self.llm_call(
  925. messages=llm_messages,
  926. model=config.model,
  927. tools=tool_schemas,
  928. temperature=config.temperature,
  929. **config.extra_llm_params,
  930. )
  931. response_content = result.get("content", "")
  932. tool_calls = result.get("tool_calls")
  933. finish_reason = result.get("finish_reason")
  934. prompt_tokens = result.get("prompt_tokens", 0)
  935. completion_tokens = result.get("completion_tokens", 0)
  936. step_cost = result.get("cost", 0)
  937. cache_creation_tokens = result.get("cache_creation_tokens")
  938. cache_read_tokens = result.get("cache_read_tokens")
  939. # 周期性自动注入上下文(仅主路径)
  940. if not side_branch_ctx and iteration % CONTEXT_INJECTION_INTERVAL == 0:
  941. # 检查是否已经调用了 get_current_context
  942. if tool_calls:
  943. has_context_call = any(
  944. tc.get("function", {}).get("name") == "get_current_context"
  945. for tc in tool_calls
  946. )
  947. else:
  948. has_context_call = False
  949. tool_calls = []
  950. if not has_context_call:
  951. # 手动添加 get_current_context 工具调用
  952. import uuid
  953. context_call_id = f"call_context_{uuid.uuid4().hex[:8]}"
  954. tool_calls.append({
  955. "id": context_call_id,
  956. "type": "function",
  957. "function": {"name": "get_current_context", "arguments": "{}"}
  958. })
  959. logger.info(f"[周期性注入] 自动添加 get_current_context 工具调用 (iteration={iteration})")
  960. # 按需自动创建 root goal(仅主路径)
  961. if not side_branch_ctx and goal_tree and not goal_tree.goals and tool_calls:
  962. has_goal_call = any(
  963. tc.get("function", {}).get("name") == "goal"
  964. for tc in tool_calls
  965. )
  966. logger.debug(f"[Auto Root Goal] Before tool execution: goal_tree.goals={len(goal_tree.goals)}, has_goal_call={has_goal_call}, tool_calls={[tc.get('function', {}).get('name') for tc in tool_calls]}")
  967. if not has_goal_call:
  968. mission = goal_tree.mission
  969. root_desc = mission[:200] if len(mission) > 200 else mission
  970. goal_tree.add_goals(
  971. descriptions=[root_desc],
  972. reasons=["系统自动创建:Agent 未显式创建目标"],
  973. parent_id=None
  974. )
  975. if self.trace_store:
  976. await self.trace_store.add_goal(trace_id, goal_tree.goals[0])
  977. await self.trace_store.update_goal_tree(trace_id, goal_tree)
  978. logger.info(f"自动创建 root goal: {goal_tree.goals[0].id}(未自动 focus,等待模型决定)")
  979. else:
  980. logger.debug(f"[Auto Root Goal] 检测到 goal 工具调用,跳过自动创建")
  981. # 获取当前 goal_id
  982. current_goal_id = goal_tree.current_id if (goal_tree and goal_tree.current_id) else None
  983. # 记录 assistant Message(parent_sequence 指向当前 head)
  984. assistant_msg = Message.create(
  985. trace_id=trace_id,
  986. role="assistant",
  987. sequence=sequence,
  988. goal_id=current_goal_id,
  989. parent_sequence=head_seq if head_seq > 0 else None,
  990. branch_type=side_branch_ctx.type if side_branch_ctx else None,
  991. branch_id=side_branch_ctx.branch_id if side_branch_ctx else None,
  992. content={"text": response_content, "tool_calls": tool_calls},
  993. prompt_tokens=prompt_tokens,
  994. completion_tokens=completion_tokens,
  995. cache_creation_tokens=cache_creation_tokens,
  996. cache_read_tokens=cache_read_tokens,
  997. finish_reason=finish_reason,
  998. cost=step_cost,
  999. )
  1000. if self.trace_store:
  1001. await self.trace_store.add_message(assistant_msg)
  1002. # 记录模型使用
  1003. await self.trace_store.record_model_usage(
  1004. trace_id=trace_id,
  1005. sequence=sequence,
  1006. role="assistant",
  1007. model=config.model,
  1008. prompt_tokens=prompt_tokens,
  1009. completion_tokens=completion_tokens,
  1010. cache_read_tokens=cache_read_tokens or 0,
  1011. )
  1012. # 如果在侧分支,记录到 assistant_msg(已持久化,不需要额外维护)
  1013. yield assistant_msg
  1014. head_seq = sequence
  1015. sequence += 1
  1016. # 检查侧分支是否应该退出
  1017. if side_branch_ctx:
  1018. # 计算侧分支已执行的轮次
  1019. turns_in_branch = iteration - side_branch_ctx.start_iteration
  1020. should_exit = turns_in_branch >= side_branch_ctx.max_turns or not tool_calls
  1021. if turns_in_branch >= side_branch_ctx.max_turns:
  1022. logger.warning(
  1023. f"侧分支 {side_branch_ctx.type} 达到最大轮次 "
  1024. f"{side_branch_ctx.max_turns},强制退出"
  1025. )
  1026. if should_exit and side_branch_ctx.type == "compression":
  1027. # === 压缩侧分支退出(超时 + 正常完成统一处理)===
  1028. summary_text = ""
  1029. # 1. 从当前回复提取
  1030. if response_content:
  1031. if "[[SUMMARY]]" in response_content:
  1032. summary_text = response_content[
  1033. response_content.index("[[SUMMARY]]") + len("[[SUMMARY]]"):
  1034. ].strip()
  1035. elif response_content.strip():
  1036. summary_text = response_content.strip()
  1037. # 2. 从持久化存储按 sequence 范围查询
  1038. if not summary_text and self.trace_store:
  1039. all_messages = await self.trace_store.get_trace_messages(trace_id)
  1040. side_messages = [
  1041. m for m in all_messages
  1042. if m.sequence >= side_branch_ctx.start_sequence
  1043. ]
  1044. for msg in reversed(side_messages):
  1045. if msg.role == "assistant" and isinstance(msg.content, dict):
  1046. text = msg.content.get("text", "")
  1047. if "[[SUMMARY]]" in text:
  1048. summary_text = text[text.index("[[SUMMARY]]") + len("[[SUMMARY]]"):].strip()
  1049. break
  1050. elif text:
  1051. summary_text = text
  1052. break
  1053. # 3. 单次 LLM 调用
  1054. if not summary_text:
  1055. logger.warning("侧分支未生成有效 summary,fallback 到单次 LLM 压缩")
  1056. pre_branch_history = history[:side_branch_ctx.start_history_length]
  1057. summary_text = await self._single_turn_compress(
  1058. trace_id, pre_branch_history, goal_tree, config,
  1059. )
  1060. # 创建主路径 summary 消息并重建 history
  1061. if summary_text:
  1062. from agent.core.prompts import build_summary_header
  1063. summary_content = build_summary_header(summary_text)
  1064. if goal_tree and goal_tree.goals:
  1065. goal_tree_detail = goal_tree.to_prompt(include_summary=True)
  1066. summary_content += f"\n\n## Current Plan\n\n{goal_tree_detail}"
  1067. # 找第一条 user message 的 sequence 作为 parent
  1068. # 续跑时 get_main_path_messages 沿 parent 链回溯,
  1069. # 指向 first_user 可以跳过所有被压缩的中间消息
  1070. first_user_seq = None
  1071. if self.trace_store:
  1072. all_msgs = await self.trace_store.get_trace_messages(trace_id)
  1073. for m in all_msgs:
  1074. if m.role == "user":
  1075. first_user_seq = m.sequence
  1076. break
  1077. summary_msg = Message.create(
  1078. trace_id=trace_id,
  1079. role="user",
  1080. sequence=sequence,
  1081. parent_sequence=first_user_seq,
  1082. branch_type=None,
  1083. content=summary_content,
  1084. )
  1085. if self.trace_store:
  1086. await self.trace_store.add_message(summary_msg)
  1087. history = self._rebuild_history_after_compression(
  1088. history, summary_msg.to_llm_dict(), label="压缩侧分支"
  1089. )
  1090. head_seq = sequence
  1091. sequence += 1
  1092. else:
  1093. logger.error("所有压缩方案均未生成有效 summary,跳过压缩")
  1094. # 清理
  1095. trace.context.pop("active_side_branch", None)
  1096. config.force_side_branch = None
  1097. if self.trace_store:
  1098. await self.trace_store.update_trace(
  1099. trace_id, context=trace.context, head_sequence=head_seq,
  1100. )
  1101. side_branch_ctx = None
  1102. continue
  1103. elif should_exit and side_branch_ctx.type == "reflection":
  1104. # === 反思侧分支退出(超时 + 正常完成统一处理)===
  1105. logger.info("反思侧分支退出")
  1106. # 恢复主路径
  1107. if self.trace_store:
  1108. main_path_messages = await self.trace_store.get_main_path_messages(
  1109. trace_id, side_branch_ctx.start_head_seq
  1110. )
  1111. history = [m.to_llm_dict() for m in main_path_messages]
  1112. head_seq = side_branch_ctx.start_head_seq
  1113. # 清理
  1114. trace.context.pop("active_side_branch", None)
  1115. if not config.force_side_branch or len(config.force_side_branch) == 0:
  1116. config.force_side_branch = None
  1117. logger.info("反思完成,队列为空")
  1118. if self.trace_store:
  1119. await self.trace_store.update_trace(
  1120. trace_id, context=trace.context, head_sequence=head_seq,
  1121. )
  1122. side_branch_ctx = None
  1123. continue
  1124. # 处理工具调用
  1125. # 截断兜底:finish_reason == "length" 说明响应被 max_tokens 截断,
  1126. # tool call 参数很可能不完整,不应执行,改为提示模型分批操作
  1127. if tool_calls and finish_reason == "length":
  1128. logger.warning(
  1129. "[Runner] 响应被 max_tokens 截断,跳过 %d 个不完整的 tool calls",
  1130. len(tool_calls),
  1131. )
  1132. truncation_hint = TRUNCATION_HINT
  1133. history.append({
  1134. "role": "assistant",
  1135. "content": response_content,
  1136. "tool_calls": tool_calls,
  1137. })
  1138. # 为每个被截断的 tool call 返回错误结果
  1139. for tc in tool_calls:
  1140. history.append({
  1141. "role": "tool",
  1142. "tool_call_id": tc["id"],
  1143. "content": truncation_hint,
  1144. })
  1145. continue
  1146. if tool_calls and config.auto_execute_tools:
  1147. history.append({
  1148. "role": "assistant",
  1149. "content": response_content,
  1150. "tool_calls": tool_calls,
  1151. })
  1152. for tc in tool_calls:
  1153. current_goal_id = goal_tree.current_id if (goal_tree and goal_tree.current_id) else None
  1154. tool_name = tc["function"]["name"]
  1155. tool_args = tc["function"]["arguments"]
  1156. if isinstance(tool_args, str):
  1157. tool_args = json.loads(tool_args) if tool_args.strip() else {}
  1158. elif tool_args is None:
  1159. tool_args = {}
  1160. # 注入知识管理工具的默认字段
  1161. if tool_name == "knowledge_save":
  1162. run_agent_id = config.agent_id or config.uid or "agent"
  1163. tool_args.setdefault("owner", config.knowledge.get_owner(run_agent_id))
  1164. if config.knowledge.default_tags:
  1165. existing_tags = tool_args.get("tags") or {}
  1166. merged_tags = {**config.knowledge.default_tags, **existing_tags}
  1167. tool_args["tags"] = merged_tags
  1168. if config.knowledge.default_scopes:
  1169. existing_scopes = tool_args.get("scopes") or []
  1170. tool_args["scopes"] = existing_scopes + config.knowledge.default_scopes
  1171. elif tool_name == "knowledge_search":
  1172. if config.knowledge.default_search_types and "types" not in tool_args:
  1173. tool_args["types"] = config.knowledge.default_search_types
  1174. if config.knowledge.default_search_owner and "owner" not in tool_args:
  1175. tool_args["owner"] = config.knowledge.default_search_owner
  1176. # 记录工具调用(INFO 级别,显示参数)
  1177. args_str = json.dumps(tool_args, ensure_ascii=False)
  1178. args_display = args_str[:100] + "..." if len(args_str) > 100 else args_str
  1179. logger.info(f"[Tool Call] {tool_name}({args_display})")
  1180. tool_result = await self.tools.execute(
  1181. tool_name,
  1182. tool_args,
  1183. uid=config.uid or "",
  1184. context={
  1185. "store": self.trace_store,
  1186. "trace_id": trace_id,
  1187. "goal_id": current_goal_id,
  1188. "runner": self,
  1189. "goal_tree": goal_tree,
  1190. "knowledge_config": config.knowledge,
  1191. # 新增:侧分支信息
  1192. "side_branch": {
  1193. "type": side_branch_ctx.type,
  1194. "branch_id": side_branch_ctx.branch_id,
  1195. "is_side_branch": True,
  1196. "max_turns": side_branch_ctx.max_turns,
  1197. } if side_branch_ctx else None,
  1198. },
  1199. )
  1200. # 如果是 goal 工具,记录执行后的状态
  1201. if tool_name == "goal" and goal_tree:
  1202. logger.debug(f"[Goal Tool] After execution: goal_tree.goals={len(goal_tree.goals)}, current_id={goal_tree.current_id}")
  1203. # 跟踪保存的知识 ID
  1204. if tool_name == "knowledge_save" and isinstance(tool_result, dict):
  1205. metadata = tool_result.get("metadata", {})
  1206. knowledge_id = metadata.get("knowledge_id")
  1207. if knowledge_id:
  1208. if trace_id not in self._saved_knowledge_ids:
  1209. self._saved_knowledge_ids[trace_id] = []
  1210. self._saved_knowledge_ids[trace_id].append(knowledge_id)
  1211. logger.info(f"[Knowledge Tracking] 记录保存的知识 ID: {knowledge_id}")
  1212. # --- 支持多模态工具反馈 ---
  1213. # execute() 返回 dict{"text","images","tool_usage"} 或 str
  1214. # 统一为dict格式
  1215. if isinstance(tool_result, str):
  1216. tool_result = {"text": tool_result}
  1217. tool_text = tool_result.get("text", str(tool_result))
  1218. tool_images = tool_result.get("images", [])
  1219. tool_usage = tool_result.get("tool_usage") # 新增:提取tool_usage
  1220. # 处理多模态消息
  1221. if tool_images:
  1222. tool_result_text = tool_text
  1223. # 构建多模态消息格式
  1224. tool_content_for_llm = [{"type": "text", "text": tool_text}]
  1225. for img in tool_images:
  1226. if img.get("type") == "base64" and img.get("data"):
  1227. media_type = img.get("media_type", "image/png")
  1228. tool_content_for_llm.append({
  1229. "type": "image_url",
  1230. "image_url": {
  1231. "url": f"data:{media_type};base64,{img['data']}"
  1232. }
  1233. })
  1234. img_count = len(tool_content_for_llm) - 1 # 减去 text 块
  1235. print(f"[Runner] 多模态工具反馈: tool={tool_name}, images={img_count}, text_len={len(tool_result_text)}")
  1236. else:
  1237. tool_result_text = tool_text
  1238. tool_content_for_llm = tool_text
  1239. tool_msg = Message.create(
  1240. trace_id=trace_id,
  1241. role="tool",
  1242. sequence=sequence,
  1243. goal_id=current_goal_id,
  1244. parent_sequence=head_seq,
  1245. tool_call_id=tc["id"],
  1246. branch_type=side_branch_ctx.type if side_branch_ctx else None,
  1247. branch_id=side_branch_ctx.branch_id if side_branch_ctx else None,
  1248. # 存储完整内容:有图片时保留 list(含 image_url),纯文本时存字符串
  1249. content={"tool_name": tool_name, "result": tool_content_for_llm},
  1250. )
  1251. if self.trace_store:
  1252. await self.trace_store.add_message(tool_msg)
  1253. # 记录工具的模型使用
  1254. if tool_usage:
  1255. await self.trace_store.record_model_usage(
  1256. trace_id=trace_id,
  1257. sequence=sequence,
  1258. role="tool",
  1259. tool_name=tool_name,
  1260. model=tool_usage.get("model"),
  1261. prompt_tokens=tool_usage.get("prompt_tokens", 0),
  1262. completion_tokens=tool_usage.get("completion_tokens", 0),
  1263. cache_read_tokens=tool_usage.get("cache_read_tokens", 0),
  1264. )
  1265. # 截图单独存为同名 PNG 文件
  1266. if tool_images:
  1267. import base64 as b64mod
  1268. for img in tool_images:
  1269. if img.get("data"):
  1270. png_path = self.trace_store._get_messages_dir(trace_id) / f"{tool_msg.message_id}.png"
  1271. png_path.write_bytes(b64mod.b64decode(img["data"]))
  1272. print(f"[Runner] 截图已保存: {png_path.name}")
  1273. break # 只存第一张
  1274. # 如果在侧分支,tool_msg 已持久化(不需要额外维护)
  1275. yield tool_msg
  1276. head_seq = sequence
  1277. sequence += 1
  1278. history.append({
  1279. "role": "tool",
  1280. "tool_call_id": tc["id"],
  1281. "name": tool_name,
  1282. "content": tool_content_for_llm,
  1283. })
  1284. # on_complete 模式:goal(done=...) 后立即压缩该 goal 的消息
  1285. if (
  1286. not side_branch_ctx
  1287. and config.goal_compression == "on_complete"
  1288. and self.trace_store
  1289. and goal_tree
  1290. ):
  1291. has_goal_done = False
  1292. for tc in tool_calls:
  1293. if tc["function"]["name"] != "goal":
  1294. continue
  1295. try:
  1296. raw = tc["function"]["arguments"]
  1297. args = json.loads(raw) if isinstance(raw, str) and raw.strip() else {}
  1298. except (json.JSONDecodeError, TypeError):
  1299. args = {}
  1300. if args.get("done") is not None:
  1301. has_goal_done = True
  1302. break
  1303. if has_goal_done:
  1304. main_path_msgs = await self.trace_store.get_main_path_messages(
  1305. trace_id, head_seq
  1306. )
  1307. compressed_msgs = compress_completed_goals(main_path_msgs, goal_tree)
  1308. if len(compressed_msgs) < len(main_path_msgs):
  1309. logger.info(
  1310. "on_complete 压缩: %d -> %d 条消息",
  1311. len(main_path_msgs), len(compressed_msgs),
  1312. )
  1313. history = [msg.to_llm_dict() for msg in compressed_msgs]
  1314. continue # 继续循环
  1315. # 无工具调用
  1316. # 如果在侧分支中,已经在上面处理过了(不会走到这里)
  1317. # 主路径无工具调用 → 任务完成,检查是否需要完成后反思
  1318. if not side_branch_ctx and config.knowledge.enable_completion_extraction and not break_after_side_branch:
  1319. config.force_side_branch = ["reflection"]
  1320. break_after_side_branch = True
  1321. logger.info("任务完成,进入完成后反思侧分支")
  1322. continue
  1323. break
  1324. # 清理 trace 相关的跟踪数据
  1325. self._context_warned.pop(trace_id, None)
  1326. self._context_usage.pop(trace_id, None)
  1327. self._saved_knowledge_ids.pop(trace_id, None)
  1328. # 更新 head_sequence 并完成 Trace
  1329. if self.trace_store:
  1330. await self.trace_store.update_trace(
  1331. trace_id,
  1332. status="completed",
  1333. head_sequence=head_seq,
  1334. completed_at=datetime.now(),
  1335. )
  1336. trace_obj = await self.trace_store.get_trace(trace_id)
  1337. if trace_obj:
  1338. yield trace_obj
  1339. # ===== 压缩辅助方法 =====
  1340. def _rebuild_history_after_compression(
  1341. self,
  1342. history: List[Dict],
  1343. summary_msg_dict: Dict,
  1344. label: str = "压缩",
  1345. ) -> List[Dict]:
  1346. """
  1347. 压缩后重建 history:system prompt + 第一条 user message + summary
  1348. Args:
  1349. history: 压缩前的 history
  1350. summary_msg_dict: summary 消息的 LLM dict
  1351. label: 日志标签
  1352. Returns:
  1353. 新的 history
  1354. """
  1355. system_msg = None
  1356. first_user_msg = None
  1357. for msg in history:
  1358. if msg.get("role") == "system" and not system_msg:
  1359. system_msg = msg
  1360. elif msg.get("role") == "user" and not first_user_msg:
  1361. first_user_msg = msg
  1362. if system_msg and first_user_msg:
  1363. break
  1364. new_history = []
  1365. if system_msg:
  1366. new_history.append(system_msg)
  1367. if first_user_msg:
  1368. new_history.append(first_user_msg)
  1369. new_history.append(summary_msg_dict)
  1370. logger.info(f"{label}完成: {len(history)} → {len(new_history)} 条消息")
  1371. for idx, msg in enumerate(new_history):
  1372. role = msg.get("role", "unknown")
  1373. content = msg.get("content", "")
  1374. if isinstance(content, str):
  1375. preview = content
  1376. elif isinstance(content, list):
  1377. preview = f"[{len(content)} blocks]"
  1378. else:
  1379. preview = str(content)
  1380. logger.info(f" {label}后[{idx}] {role}: {preview}")
  1381. <<<<<<< HEAD
  1382. return new_history
  1383. =======
  1384. tool_calls = reflect_result.get("tool_calls") or []
  1385. if not tool_calls:
  1386. logger.info("反思阶段无经验保存 (source=%s)", source_name)
  1387. return
  1388. saved_count = 0
  1389. for tc in tool_calls:
  1390. tool_name = tc.get("function", {}).get("name")
  1391. if tool_name != "knowledge_save":
  1392. continue
  1393. tool_args = tc.get("function", {}).get("arguments") or {}
  1394. if isinstance(tool_args, str):
  1395. tool_args = json.loads(tool_args) if tool_args.strip() else {}
  1396. # 注入来源信息(LLM 不需要填写这些字段)
  1397. tool_args.setdefault("source_name", source_name)
  1398. tool_args.setdefault("source_category", "exp")
  1399. tool_args.setdefault("message_id", trace_id)
  1400. # 注入知识管理默认字段
  1401. run_agent_id = config.agent_id or config.uid or "agent"
  1402. tool_args.setdefault("owner", config.knowledge.get_owner(run_agent_id))
  1403. if config.knowledge.default_tags:
  1404. existing_tags = tool_args.get("tags") or {}
  1405. merged_tags = {**config.knowledge.default_tags, **existing_tags}
  1406. tool_args["tags"] = merged_tags
  1407. if config.knowledge.default_scopes:
  1408. tool_args.setdefault("scopes", config.knowledge.default_scopes)
  1409. try:
  1410. await self.tools.execute(
  1411. "knowledge_save",
  1412. tool_args,
  1413. uid=config.uid or "",
  1414. context={"store": self.trace_store, "trace_id": trace_id},
  1415. )
  1416. saved_count += 1
  1417. except Exception as e:
  1418. logger.warning("保存经验失败: %s", e)
  1419. logger.info("已提取并保存 %d 条经验 (source=%s)", saved_count, source_name)
  1420. except Exception as e:
  1421. logger.error("知识反思提取失败 (source=%s): %s", source_name, e)
  1422. async def _extract_knowledge_on_completion(
  1423. self,
  1424. trace_id: str,
  1425. history: List[Dict],
  1426. config: RunConfig,
  1427. ) -> None:
  1428. """任务完成后执行全局复盘,提取经验保存到知识库。"""
  1429. logger.info("任务完成后复盘提取: trace=%s", trace_id)
  1430. await self._run_reflect(
  1431. trace_id, history, config,
  1432. reflect_prompt=config.knowledge.get_completion_reflect_prompt(),
  1433. source_name="completion_reflection",
  1434. )
  1435. >>>>>>> 0a8d3f1 (修改远程库地址和新流程)
  1436. # ===== 回溯(Rewind)=====
  1437. async def _rewind(
  1438. self,
  1439. trace_id: str,
  1440. after_sequence: int,
  1441. goal_tree: Optional[GoalTree],
  1442. ) -> int:
  1443. """
  1444. 执行回溯:快照 GoalTree,重建干净树,设置 head_sequence
  1445. 新消息的 parent_sequence 将指向 rewind 点,旧消息通过树结构自然脱离主路径。
  1446. Returns:
  1447. 下一个可用的 sequence 号
  1448. """
  1449. if not self.trace_store:
  1450. raise ValueError("trace_store required for rewind")
  1451. # 1. 加载所有 messages(用于 safe cutoff 和 max sequence)
  1452. all_messages = await self.trace_store.get_trace_messages(trace_id)
  1453. if not all_messages:
  1454. return 1
  1455. # 2. 找到安全截断点(确保不截断在 tool_call 和 tool response 之间)
  1456. cutoff = self._find_safe_cutoff(all_messages, after_sequence)
  1457. # 3. 快照并重建 GoalTree
  1458. if goal_tree:
  1459. # 获取截断点消息的 created_at 作为时间界限
  1460. cutoff_msg = None
  1461. for msg in all_messages:
  1462. if msg.sequence == cutoff:
  1463. cutoff_msg = msg
  1464. break
  1465. cutoff_time = cutoff_msg.created_at if cutoff_msg else datetime.now()
  1466. # 快照到 events(含 head_sequence 供前端感知分支切换)
  1467. await self.trace_store.append_event(trace_id, "rewind", {
  1468. "after_sequence": cutoff,
  1469. "head_sequence": cutoff,
  1470. "goal_tree_snapshot": goal_tree.to_dict(),
  1471. })
  1472. # 按时间重建干净的 GoalTree
  1473. new_tree = goal_tree.rebuild_for_rewind(cutoff_time)
  1474. await self.trace_store.update_goal_tree(trace_id, new_tree)
  1475. # 更新内存中的引用
  1476. goal_tree.goals = new_tree.goals
  1477. goal_tree.current_id = new_tree.current_id
  1478. # 4. 更新 head_sequence 到 rewind 点
  1479. await self.trace_store.update_trace(trace_id, head_sequence=cutoff)
  1480. # 5. 返回 next sequence(全局递增,不复用)
  1481. max_seq = max((m.sequence for m in all_messages), default=0)
  1482. return max_seq + 1
  1483. def _find_safe_cutoff(self, messages: List[Message], after_sequence: int) -> int:
  1484. """
  1485. 找到安全的截断点。
  1486. 如果 after_sequence 指向一条带 tool_calls 的 assistant message,
  1487. 则自动扩展到其所有对应的 tool response 之后。
  1488. """
  1489. cutoff = after_sequence
  1490. # 找到 after_sequence 对应的 message
  1491. target_msg = None
  1492. for msg in messages:
  1493. if msg.sequence == after_sequence:
  1494. target_msg = msg
  1495. break
  1496. if not target_msg:
  1497. return cutoff
  1498. # 如果是 assistant 且有 tool_calls,找到所有对应的 tool responses
  1499. if target_msg.role == "assistant":
  1500. content = target_msg.content
  1501. if isinstance(content, dict) and content.get("tool_calls"):
  1502. tool_call_ids = set()
  1503. for tc in content["tool_calls"]:
  1504. if isinstance(tc, dict) and tc.get("id"):
  1505. tool_call_ids.add(tc["id"])
  1506. # 找到这些 tool_call 对应的 tool messages
  1507. for msg in messages:
  1508. if (msg.role == "tool" and msg.tool_call_id
  1509. and msg.tool_call_id in tool_call_ids):
  1510. cutoff = max(cutoff, msg.sequence)
  1511. return cutoff
  1512. async def _heal_orphaned_tool_calls(
  1513. self,
  1514. messages: List[Message],
  1515. trace_id: str,
  1516. goal_tree: Optional[GoalTree],
  1517. sequence: int,
  1518. ) -> tuple:
  1519. """
  1520. 检测并修复消息历史中的 orphaned tool_calls。
  1521. 当 agent 被 stop/crash 中断时,可能有 assistant 的 tool_calls 没有对应的
  1522. tool results(包括多 tool_call 部分完成的情况)。直接发给 LLM 会导致 400。
  1523. 修复策略:为每个缺失的 tool_result 插入合成的"中断通知"消息,而非裁剪。
  1524. - 普通工具:简短中断提示
  1525. - agent/evaluate:包含 sub_trace_id、执行统计、continue_from 指引
  1526. 合成消息持久化到 store,确保幂等(下次续跑不再触发)。
  1527. Returns:
  1528. (healed_messages, next_sequence)
  1529. """
  1530. if not messages:
  1531. return messages, sequence
  1532. # 收集所有 tool_call IDs → (assistant_msg, tool_call_dict)
  1533. tc_map: Dict[str, tuple] = {}
  1534. result_ids: set = set()
  1535. for msg in messages:
  1536. if msg.role == "assistant":
  1537. content = msg.content
  1538. if isinstance(content, dict) and content.get("tool_calls"):
  1539. for tc in content["tool_calls"]:
  1540. tc_id = tc.get("id")
  1541. if tc_id:
  1542. tc_map[tc_id] = (msg, tc)
  1543. elif msg.role == "tool" and msg.tool_call_id:
  1544. result_ids.add(msg.tool_call_id)
  1545. orphaned_ids = [tc_id for tc_id in tc_map if tc_id not in result_ids]
  1546. if not orphaned_ids:
  1547. return messages, sequence
  1548. logger.info(
  1549. "检测到 %d 个 orphaned tool_calls,生成合成中断通知",
  1550. len(orphaned_ids),
  1551. )
  1552. healed = list(messages)
  1553. head_seq = messages[-1].sequence
  1554. for tc_id in orphaned_ids:
  1555. assistant_msg, tc = tc_map[tc_id]
  1556. tool_name = tc.get("function", {}).get("name", "unknown")
  1557. if tool_name in ("agent", "evaluate"):
  1558. result_text = self._build_agent_interrupted_result(
  1559. tc, goal_tree, assistant_msg,
  1560. )
  1561. else:
  1562. result_text = build_tool_interrupted_message(tool_name)
  1563. synthetic_msg = Message.create(
  1564. trace_id=trace_id,
  1565. role="tool",
  1566. sequence=sequence,
  1567. goal_id=assistant_msg.goal_id,
  1568. parent_sequence=head_seq,
  1569. tool_call_id=tc_id,
  1570. content={"tool_name": tool_name, "result": result_text},
  1571. )
  1572. if self.trace_store:
  1573. await self.trace_store.add_message(synthetic_msg)
  1574. healed.append(synthetic_msg)
  1575. head_seq = sequence
  1576. sequence += 1
  1577. # 更新 trace head/last sequence
  1578. if self.trace_store:
  1579. await self.trace_store.update_trace(
  1580. trace_id,
  1581. head_sequence=head_seq,
  1582. last_sequence=max(head_seq, sequence - 1),
  1583. )
  1584. return healed, sequence
  1585. def _build_agent_interrupted_result(
  1586. self,
  1587. tc: Dict,
  1588. goal_tree: Optional[GoalTree],
  1589. assistant_msg: Message,
  1590. ) -> str:
  1591. """为中断的 agent/evaluate 工具调用构建合成结果(对齐正常返回值格式)"""
  1592. args_str = tc.get("function", {}).get("arguments", "{}")
  1593. try:
  1594. args = json.loads(args_str) if isinstance(args_str, str) else args_str
  1595. except json.JSONDecodeError:
  1596. args = {}
  1597. task = args.get("task", "未知任务")
  1598. if isinstance(task, list):
  1599. task = "; ".join(task)
  1600. tool_name = tc.get("function", {}).get("name", "agent")
  1601. mode = "evaluate" if tool_name == "evaluate" else "delegate"
  1602. # 从 goal_tree 查找 sub_trace 信息
  1603. sub_trace_id = None
  1604. stats = None
  1605. if goal_tree and assistant_msg.goal_id:
  1606. goal = goal_tree.find(assistant_msg.goal_id)
  1607. if goal and goal.sub_trace_ids:
  1608. first = goal.sub_trace_ids[0]
  1609. if isinstance(first, dict):
  1610. sub_trace_id = first.get("trace_id")
  1611. elif isinstance(first, str):
  1612. sub_trace_id = first
  1613. if goal.cumulative_stats:
  1614. s = goal.cumulative_stats
  1615. if s.message_count > 0:
  1616. stats = {
  1617. "message_count": s.message_count,
  1618. "total_tokens": s.total_tokens,
  1619. "total_cost": round(s.total_cost, 4),
  1620. }
  1621. result: Dict[str, Any] = {
  1622. "mode": mode,
  1623. "status": "interrupted",
  1624. "summary": AGENT_INTERRUPTED_SUMMARY,
  1625. "task": task,
  1626. }
  1627. if sub_trace_id:
  1628. result["sub_trace_id"] = sub_trace_id
  1629. result["hint"] = build_agent_continue_hint(sub_trace_id)
  1630. if stats:
  1631. result["stats"] = stats
  1632. return json.dumps(result, ensure_ascii=False, indent=2)
  1633. # ===== 上下文注入 =====
  1634. def _build_context_injection(
  1635. self,
  1636. trace: Trace,
  1637. goal_tree: Optional[GoalTree],
  1638. ) -> str:
  1639. """构建周期性注入的上下文(GoalTree + Active Collaborators + Focus 提醒)"""
  1640. parts = []
  1641. # GoalTree
  1642. if goal_tree and goal_tree.goals:
  1643. parts.append(f"## Current Plan\n\n{goal_tree.to_prompt()}")
  1644. if goal_tree.current_id:
  1645. # 检测 focus 在有子节点的父目标上:提醒模型 focus 到具体子目标
  1646. children = goal_tree.get_children(goal_tree.current_id)
  1647. pending_children = [c for c in children if c.status in ("pending", "in_progress")]
  1648. if pending_children:
  1649. child_ids = ", ".join(
  1650. goal_tree._generate_display_id(c) for c in pending_children[:3]
  1651. )
  1652. parts.append(
  1653. f"**提醒**:当前焦点在父目标上,建议用 `goal(focus=\"...\")` "
  1654. f"切换到具体子目标(如 {child_ids})再执行。"
  1655. )
  1656. else:
  1657. # 无焦点:提醒模型 focus
  1658. parts.append(
  1659. "**提醒**:当前没有焦点目标。请用 `goal(focus=\"...\")` 选择一个目标开始执行。"
  1660. )
  1661. # Active Collaborators
  1662. collaborators = trace.context.get("collaborators", [])
  1663. if collaborators:
  1664. lines = ["## Active Collaborators"]
  1665. for c in collaborators:
  1666. status_str = c.get("status", "unknown")
  1667. ctype = c.get("type", "agent")
  1668. summary = c.get("summary", "")
  1669. name = c.get("name", "unnamed")
  1670. lines.append(f"- {name} [{ctype}, {status_str}]: {summary}")
  1671. parts.append("\n".join(lines))
  1672. return "\n\n".join(parts)
  1673. # ===== 辅助方法 =====
  1674. def _add_cache_control(
  1675. self,
  1676. messages: List[Dict],
  1677. model: str,
  1678. enable: bool
  1679. ) -> List[Dict]:
  1680. """
  1681. 为支持的模型添加 Prompt Caching 标记
  1682. 策略:固定位置 + 延迟查找
  1683. 1. system message 添加缓存(如果足够长)
  1684. 2. 固定位置缓存点(20, 40, 60, 80),确保每个缓存点间隔 >= 1024 tokens
  1685. 3. 最多使用 4 个缓存点(含 system)
  1686. Args:
  1687. messages: 原始消息列表
  1688. model: 模型名称
  1689. enable: 是否启用缓存
  1690. Returns:
  1691. 添加了 cache_control 的消息列表(深拷贝)
  1692. """
  1693. if not enable:
  1694. return messages
  1695. # 只对 Claude 模型启用
  1696. if "claude" not in model.lower():
  1697. return messages
  1698. # 深拷贝避免修改原始数据
  1699. import copy
  1700. messages = copy.deepcopy(messages)
  1701. # 策略 1: 为 system message 添加缓存
  1702. system_cached = False
  1703. for msg in messages:
  1704. if msg.get("role") == "system":
  1705. content = msg.get("content", "")
  1706. if isinstance(content, str) and len(content) > 1000:
  1707. msg["content"] = [{
  1708. "type": "text",
  1709. "text": content,
  1710. "cache_control": {"type": "ephemeral"}
  1711. }]
  1712. system_cached = True
  1713. logger.debug(f"[Cache] 为 system message 添加缓存标记 (len={len(content)})")
  1714. break
  1715. # 策略 2: 固定位置缓存点
  1716. CACHE_INTERVAL = 20
  1717. MAX_POINTS = 3 if system_cached else 4
  1718. MIN_TOKENS = 1024
  1719. AVG_TOKENS_PER_MSG = 70
  1720. total_msgs = len(messages)
  1721. if total_msgs == 0:
  1722. return messages
  1723. cache_positions = []
  1724. last_cache_pos = 0
  1725. for i in range(1, MAX_POINTS + 1):
  1726. target_pos = i * CACHE_INTERVAL - 1 # 19, 39, 59, 79
  1727. if target_pos >= total_msgs:
  1728. break
  1729. # 从目标位置开始查找合适的 user/assistant 消息
  1730. for j in range(target_pos, total_msgs):
  1731. msg = messages[j]
  1732. if msg.get("role") not in ("user", "assistant"):
  1733. continue
  1734. content = msg.get("content", "")
  1735. if not content:
  1736. continue
  1737. # 检查 content 是否非空
  1738. is_valid = False
  1739. if isinstance(content, str):
  1740. is_valid = len(content) > 0
  1741. elif isinstance(content, list):
  1742. is_valid = any(
  1743. isinstance(block, dict) and
  1744. block.get("type") == "text" and
  1745. len(block.get("text", "")) > 0
  1746. for block in content
  1747. )
  1748. if not is_valid:
  1749. continue
  1750. # 检查 token 距离
  1751. msg_count = j - last_cache_pos
  1752. estimated_tokens = msg_count * AVG_TOKENS_PER_MSG
  1753. if estimated_tokens >= MIN_TOKENS:
  1754. cache_positions.append(j)
  1755. last_cache_pos = j
  1756. logger.debug(f"[Cache] 在位置 {j} 添加缓存点 (估算 {estimated_tokens} tokens)")
  1757. break
  1758. # 应用缓存标记
  1759. for idx in cache_positions:
  1760. msg = messages[idx]
  1761. content = msg.get("content", "")
  1762. if isinstance(content, str):
  1763. msg["content"] = [{
  1764. "type": "text",
  1765. "text": content,
  1766. "cache_control": {"type": "ephemeral"}
  1767. }]
  1768. logger.debug(f"[Cache] 为 message[{idx}] ({msg.get('role')}) 添加缓存标记")
  1769. elif isinstance(content, list):
  1770. # 在最后一个 text block 添加 cache_control
  1771. for block in reversed(content):
  1772. if isinstance(block, dict) and block.get("type") == "text":
  1773. block["cache_control"] = {"type": "ephemeral"}
  1774. logger.debug(f"[Cache] 为 message[{idx}] ({msg.get('role')}) 添加缓存标记")
  1775. break
  1776. logger.debug(
  1777. f"[Cache] 总消息: {total_msgs}, "
  1778. f"缓存点: {len(cache_positions)} at {cache_positions}"
  1779. )
  1780. return messages
  1781. def _get_tool_schemas(self, tools: Optional[List[str]]) -> List[Dict]:
  1782. """
  1783. 获取工具 Schema
  1784. - tools=None: 使用 registry 中全部已注册工具(含内置 + 外部注册的)
  1785. - tools=["a", "b"]: 在 BUILTIN_TOOLS 基础上追加指定工具
  1786. """
  1787. if tools is None:
  1788. # 全部已注册工具
  1789. tool_names = self.tools.get_tool_names()
  1790. else:
  1791. # BUILTIN_TOOLS + 显式指定的额外工具
  1792. tool_names = BUILTIN_TOOLS.copy()
  1793. for t in tools:
  1794. if t not in tool_names:
  1795. tool_names.append(t)
  1796. return self.tools.get_schemas(tool_names)
  1797. # 默认 system prompt 前缀(当 config.system_prompt 和前端都未提供 system message 时使用)
  1798. # 注意:此常量已迁移到 agent.core.prompts,这里保留引用以保持向后兼容
  1799. async def _build_system_prompt(self, config: RunConfig, base_prompt: Optional[str] = None) -> Optional[str]:
  1800. """构建 system prompt(注入 skills)
  1801. 优先级:
  1802. 1. config.skills 显式指定 → 按名称过滤
  1803. 2. config.skills 为 None → 查 preset 的默认 skills 列表
  1804. 3. preset 也无 skills(None)→ 加载全部(向后兼容)
  1805. Args:
  1806. base_prompt: 已有 system 内容(来自消息或 config.system_prompt),
  1807. None 时使用 config.system_prompt
  1808. """
  1809. from agent.core.presets import AGENT_PRESETS
  1810. system_prompt = base_prompt if base_prompt is not None else config.system_prompt
  1811. # 确定要加载哪些 skills
  1812. skills_filter: Optional[List[str]] = config.skills
  1813. if skills_filter is None:
  1814. preset = AGENT_PRESETS.get(config.agent_type)
  1815. if preset is not None:
  1816. skills_filter = preset.skills # 可能仍为 None(加载全部)
  1817. # 加载并过滤
  1818. all_skills = load_skills_from_dir(self.skills_dir)
  1819. if skills_filter is not None:
  1820. skills = [s for s in all_skills if s.name in skills_filter]
  1821. else:
  1822. skills = all_skills
  1823. skills_text = self._format_skills(skills) if skills else ""
  1824. if system_prompt:
  1825. if skills_text:
  1826. system_prompt += f"\n\n## Skills\n{skills_text}"
  1827. else:
  1828. system_prompt = DEFAULT_SYSTEM_PREFIX
  1829. if skills_text:
  1830. system_prompt += f"\n\n## Skills\n{skills_text}"
  1831. return system_prompt
  1832. async def _generate_task_name(self, messages: List[Dict]) -> str:
  1833. """生成任务名称:优先使用 utility_llm,fallback 到文本截取"""
  1834. # 提取 messages 中的文本内容
  1835. text_parts = []
  1836. for msg in messages:
  1837. content = msg.get("content", "")
  1838. if isinstance(content, str):
  1839. text_parts.append(content)
  1840. elif isinstance(content, list):
  1841. for part in content:
  1842. if isinstance(part, dict) and part.get("type") == "text":
  1843. text_parts.append(part.get("text", ""))
  1844. raw_text = " ".join(text_parts).strip()
  1845. if not raw_text:
  1846. return TASK_NAME_FALLBACK
  1847. # 尝试使用 utility_llm 生成标题
  1848. if self.utility_llm_call:
  1849. try:
  1850. result = await self.utility_llm_call(
  1851. messages=[
  1852. {"role": "system", "content": TASK_NAME_GENERATION_SYSTEM_PROMPT},
  1853. {"role": "user", "content": raw_text[:2000]},
  1854. ],
  1855. model="gpt-4o-mini", # 使用便宜模型
  1856. )
  1857. title = result.get("content", "").strip()
  1858. if title and len(title) < 100:
  1859. return title
  1860. except Exception:
  1861. pass
  1862. # Fallback: 截取前 50 字符
  1863. return raw_text[:50] + ("..." if len(raw_text) > 50 else "")
  1864. def _format_skills(self, skills: List[Skill]) -> str:
  1865. if not skills:
  1866. return ""
  1867. return "\n\n".join(s.to_prompt_text() for s in skills)