runner.py 70 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783
  1. """
  2. Agent Runner - Agent 执行引擎
  3. 核心职责:
  4. 1. 执行 Agent 任务(循环调用 LLM + 工具)
  5. 2. 记录执行轨迹(Trace + Messages + GoalTree)
  6. 3. 检索和注入记忆(Experience + Skill)
  7. 4. 管理执行计划(GoalTree)
  8. 5. 支持续跑(continue)和回溯重跑(rewind)
  9. 参数分层:
  10. - Infrastructure: AgentRunner 构造时设置(trace_store, llm_call 等)
  11. - RunConfig: 每次 run 时指定(model, trace_id, after_sequence 等)
  12. - Messages: OpenAI SDK 格式的任务消息
  13. """
  14. import asyncio
  15. import json
  16. import logging
  17. import os
  18. import uuid
  19. from dataclasses import dataclass, field
  20. from datetime import datetime
  21. from typing import AsyncIterator, Optional, Dict, Any, List, Callable, Literal, Tuple, Union
  22. from agent.trace.models import Trace, Message
  23. from agent.trace.protocols import TraceStore
  24. from agent.trace.goal_models import GoalTree
  25. from agent.tools.builtin.experience import _get_structured_experiences, _batch_update_experiences
  26. from agent.trace.compaction import (
  27. CompressionConfig,
  28. filter_by_goal_status,
  29. estimate_tokens,
  30. needs_level2_compression,
  31. build_compression_prompt,
  32. build_reflect_prompt,
  33. )
  34. from agent.memory.models import Skill
  35. from agent.memory.protocols import MemoryStore, StateStore
  36. from agent.memory.skill_loader import load_skills_from_dir
  37. from agent.tools import ToolRegistry, get_tool_registry
  38. logger = logging.getLogger(__name__)
  39. # ===== 运行配置 =====
  40. @dataclass
  41. class RunConfig:
  42. """
  43. 运行参数 — 控制 Agent 如何执行
  44. 分为模型层参数(由上游 agent 或用户决定)和框架层参数(由系统注入)。
  45. """
  46. # --- 模型层参数 ---
  47. model: str = "gpt-4o"
  48. temperature: float = 0.3
  49. max_iterations: int = 200
  50. tools: Optional[List[str]] = None # None = 全部已注册工具
  51. # --- 框架层参数 ---
  52. agent_type: str = "default"
  53. uid: Optional[str] = None
  54. system_prompt: Optional[str] = None # None = 从 skills 自动构建
  55. skills: Optional[List[str]] = None # 注入 system prompt 的 skill 名称列表;None = 按 preset 决定
  56. enable_memory: bool = True
  57. auto_execute_tools: bool = True
  58. name: Optional[str] = None # 显示名称(空则由 utility_llm 自动生成)
  59. enable_prompt_caching: bool = True # 启用 Anthropic Prompt Caching(仅 Claude 模型有效)
  60. # --- Trace 控制 ---
  61. trace_id: Optional[str] = None # None = 新建
  62. parent_trace_id: Optional[str] = None # 子 Agent 专用
  63. parent_goal_id: Optional[str] = None
  64. # --- 续跑控制 ---
  65. after_sequence: Optional[int] = None # 从哪条消息后续跑(message sequence)
  66. # --- 额外 LLM 参数(传给 llm_call 的 **kwargs)---
  67. extra_llm_params: Dict[str, Any] = field(default_factory=dict)
  68. # 内置工具列表(始终自动加载)
  69. BUILTIN_TOOLS = [
  70. # 文件操作工具
  71. "read_file",
  72. "edit_file",
  73. "write_file",
  74. "glob_files",
  75. "grep_content",
  76. # 系统工具
  77. "bash_command",
  78. # 技能和目标管理
  79. "skill",
  80. "list_skills",
  81. "goal",
  82. "agent",
  83. "evaluate",
  84. # 搜索工具
  85. "search_posts",
  86. "get_experience",
  87. "get_search_suggestions",
  88. # 沙箱工具
  89. "sandbox_create_environment",
  90. "sandbox_run_shell",
  91. "sandbox_rebuild_with_ports",
  92. "sandbox_destroy_environment",
  93. # 浏览器工具
  94. "browser_navigate_to_url",
  95. "browser_search_web",
  96. "browser_go_back",
  97. "browser_wait",
  98. "browser_click_element",
  99. "browser_input_text",
  100. "browser_send_keys",
  101. "browser_upload_file",
  102. "browser_scroll_page",
  103. "browser_find_text",
  104. "browser_screenshot",
  105. "browser_switch_tab",
  106. "browser_close_tab",
  107. "browser_get_dropdown_options",
  108. "browser_select_dropdown_option",
  109. "browser_extract_content",
  110. "browser_read_long_content",
  111. "browser_download_direct_url",
  112. "browser_get_page_html",
  113. "browser_get_visual_selector_map",
  114. "browser_evaluate",
  115. "browser_ensure_login_with_cookies",
  116. "browser_wait_for_user_action",
  117. "browser_done",
  118. "browser_export_cookies",
  119. "browser_load_cookies"
  120. ]
  121. # ===== 向后兼容 =====
  122. @dataclass
  123. class AgentConfig:
  124. """[向后兼容] Agent 配置,新代码请使用 RunConfig"""
  125. agent_type: str = "default"
  126. max_iterations: int = 200
  127. enable_memory: bool = True
  128. auto_execute_tools: bool = True
  129. @dataclass
  130. class CallResult:
  131. """单次调用结果"""
  132. reply: str
  133. tool_calls: Optional[List[Dict]] = None
  134. trace_id: Optional[str] = None
  135. step_id: Optional[str] = None
  136. tokens: Optional[Dict[str, int]] = None
  137. cost: float = 0.0
  138. # ===== 执行引擎 =====
  139. CONTEXT_INJECTION_INTERVAL = 10 # 每 N 轮注入一次 GoalTree + Collaborators
  140. class AgentRunner:
  141. """
  142. Agent 执行引擎
  143. 支持三种运行模式(通过 RunConfig 区分):
  144. 1. 新建:trace_id=None
  145. 2. 续跑:trace_id=已有ID, after_sequence=None 或 == head
  146. 3. 回溯:trace_id=已有ID, after_sequence=N(N < head_sequence)
  147. """
  148. def __init__(
  149. self,
  150. trace_store: Optional[TraceStore] = None,
  151. memory_store: Optional[MemoryStore] = None,
  152. state_store: Optional[StateStore] = None,
  153. tool_registry: Optional[ToolRegistry] = None,
  154. llm_call: Optional[Callable] = None,
  155. utility_llm_call: Optional[Callable] = None,
  156. embedding_call: Optional[Callable] = None,
  157. config: Optional[AgentConfig] = None,
  158. skills_dir: Optional[str] = None,
  159. experiences_path: Optional[str] = "./.cache/experiences.md",
  160. goal_tree: Optional[GoalTree] = None,
  161. debug: bool = False,
  162. ):
  163. """
  164. 初始化 AgentRunner
  165. Args:
  166. trace_store: Trace 存储
  167. memory_store: Memory 存储(可选)
  168. state_store: State 存储(可选)
  169. tool_registry: 工具注册表(默认使用全局注册表)
  170. llm_call: 主 LLM 调用函数
  171. embedding_call: 语义嵌入向量LLM
  172. utility_llm_call: 轻量 LLM(用于生成任务标题等),可选
  173. config: [向后兼容] AgentConfig
  174. skills_dir: Skills 目录路径
  175. experiences_path: 经验文件路径(默认 ./.cache/experiences.md)
  176. goal_tree: 初始 GoalTree(可选)
  177. debug: 保留参数(已废弃)
  178. """
  179. self.trace_store = trace_store
  180. self.memory_store = memory_store
  181. self.state_store = state_store
  182. self.tools = tool_registry or get_tool_registry()
  183. self.llm_call = llm_call
  184. self.embedding_call = embedding_call
  185. self.utility_llm_call = utility_llm_call
  186. self.config = config or AgentConfig()
  187. self.skills_dir = skills_dir
  188. # 确保 experiences_path 不为 None
  189. self.experiences_path = experiences_path
  190. self.goal_tree = goal_tree
  191. self.debug = debug
  192. self._cancel_events: Dict[str, asyncio.Event] = {} # trace_id → cancel event
  193. self.used_ex_ids: List[str] = [] # 当前运行中使用过的经验 ID
  194. # ===== 核心公开方法 =====
  195. async def run(
  196. self,
  197. messages: List[Dict],
  198. config: Optional[RunConfig] = None,
  199. ) -> AsyncIterator[Union[Trace, Message]]:
  200. """
  201. Agent 模式执行(核心方法)
  202. Args:
  203. messages: OpenAI SDK 格式的输入消息
  204. 新建: 初始任务消息 [{"role": "user", "content": "..."}]
  205. 续跑: 追加的新消息
  206. 回溯: 在插入点之后追加的消息
  207. config: 运行配置
  208. Yields:
  209. Union[Trace, Message]: Trace 对象(状态变化)或 Message 对象(执行过程)
  210. """
  211. if not self.llm_call:
  212. raise ValueError("llm_call function not provided")
  213. config = config or RunConfig()
  214. trace = None
  215. try:
  216. # Phase 1: PREPARE TRACE
  217. trace, goal_tree, sequence = await self._prepare_trace(messages, config)
  218. # 注册取消事件
  219. self._cancel_events[trace.trace_id] = asyncio.Event()
  220. yield trace
  221. # Phase 2: BUILD HISTORY
  222. history, sequence, created_messages, head_seq = await self._build_history(
  223. trace.trace_id, messages, goal_tree, config, sequence
  224. )
  225. # Update trace's head_sequence in memory
  226. trace.head_sequence = head_seq
  227. for msg in created_messages:
  228. yield msg
  229. # Phase 3: AGENT LOOP
  230. async for event in self._agent_loop(trace, history, goal_tree, config, sequence):
  231. yield event
  232. except Exception as e:
  233. logger.error(f"Agent run failed: {e}")
  234. tid = config.trace_id or (trace.trace_id if trace else None)
  235. if self.trace_store and tid:
  236. # 读取当前 last_sequence 作为 head_sequence,确保续跑时能加载完整历史
  237. current = await self.trace_store.get_trace(tid)
  238. head_seq = current.last_sequence if current else None
  239. await self.trace_store.update_trace(
  240. tid,
  241. status="failed",
  242. head_sequence=head_seq,
  243. error_message=str(e),
  244. completed_at=datetime.now()
  245. )
  246. trace_obj = await self.trace_store.get_trace(tid)
  247. if trace_obj:
  248. yield trace_obj
  249. raise
  250. finally:
  251. # 清理取消事件
  252. if trace:
  253. self._cancel_events.pop(trace.trace_id, None)
  254. async def run_result(
  255. self,
  256. messages: List[Dict],
  257. config: Optional[RunConfig] = None,
  258. on_event: Optional[Callable] = None,
  259. ) -> Dict[str, Any]:
  260. """
  261. 结果模式 — 消费 run(),返回结构化结果。
  262. 主要用于 agent/evaluate 工具内部。
  263. Args:
  264. on_event: 可选回调,每个 Trace/Message 事件触发一次,用于实时输出子 Agent 执行过程。
  265. """
  266. last_assistant_text = ""
  267. final_trace: Optional[Trace] = None
  268. async for item in self.run(messages=messages, config=config):
  269. if on_event:
  270. on_event(item)
  271. if isinstance(item, Message) and item.role == "assistant":
  272. content = item.content
  273. text = ""
  274. if isinstance(content, dict):
  275. text = content.get("text", "") or ""
  276. elif isinstance(content, str):
  277. text = content
  278. if text and text.strip():
  279. last_assistant_text = text
  280. elif isinstance(item, Trace):
  281. final_trace = item
  282. config = config or RunConfig()
  283. if not final_trace and config.trace_id and self.trace_store:
  284. final_trace = await self.trace_store.get_trace(config.trace_id)
  285. status = final_trace.status if final_trace else "unknown"
  286. error = final_trace.error_message if final_trace else None
  287. summary = last_assistant_text
  288. if not summary:
  289. status = "failed"
  290. error = error or "Agent 没有产生 assistant 文本结果"
  291. return {
  292. "status": status,
  293. "summary": summary,
  294. "trace_id": final_trace.trace_id if final_trace else config.trace_id,
  295. "error": error,
  296. "stats": {
  297. "total_messages": final_trace.total_messages if final_trace else 0,
  298. "total_tokens": final_trace.total_tokens if final_trace else 0,
  299. "total_cost": final_trace.total_cost if final_trace else 0.0,
  300. },
  301. }
  302. async def stop(self, trace_id: str) -> bool:
  303. """
  304. 停止运行中的 Trace
  305. 设置取消信号,agent loop 在下一个 LLM 调用前检查并退出。
  306. Trace 状态置为 "stopped"。
  307. Returns:
  308. True 如果成功发送停止信号,False 如果该 trace 不在运行中
  309. """
  310. cancel_event = self._cancel_events.get(trace_id)
  311. if cancel_event is None:
  312. return False
  313. cancel_event.set()
  314. return True
  315. # ===== 单次调用(保留)=====
  316. async def call(
  317. self,
  318. messages: List[Dict],
  319. model: str = "gpt-4o",
  320. tools: Optional[List[str]] = None,
  321. uid: Optional[str] = None,
  322. trace: bool = True,
  323. **kwargs
  324. ) -> CallResult:
  325. """
  326. 单次 LLM 调用(无 Agent Loop)
  327. """
  328. if not self.llm_call:
  329. raise ValueError("llm_call function not provided")
  330. trace_id = None
  331. message_id = None
  332. tool_schemas = self._get_tool_schemas(tools)
  333. if trace and self.trace_store:
  334. trace_obj = Trace.create(mode="call", uid=uid, model=model, tools=tool_schemas, llm_params=kwargs)
  335. trace_id = await self.trace_store.create_trace(trace_obj)
  336. result = await self.llm_call(messages=messages, model=model, tools=tool_schemas, **kwargs)
  337. if trace and self.trace_store and trace_id:
  338. msg = Message.create(
  339. trace_id=trace_id, role="assistant", sequence=1, goal_id=None,
  340. content={"text": result.get("content", ""), "tool_calls": result.get("tool_calls")},
  341. prompt_tokens=result.get("prompt_tokens", 0),
  342. completion_tokens=result.get("completion_tokens", 0),
  343. finish_reason=result.get("finish_reason"),
  344. cost=result.get("cost", 0),
  345. )
  346. message_id = await self.trace_store.add_message(msg)
  347. await self.trace_store.update_trace(trace_id, status="completed", completed_at=datetime.now())
  348. return CallResult(
  349. reply=result.get("content", ""),
  350. tool_calls=result.get("tool_calls"),
  351. trace_id=trace_id,
  352. step_id=message_id,
  353. tokens={"prompt": result.get("prompt_tokens", 0), "completion": result.get("completion_tokens", 0)},
  354. cost=result.get("cost", 0)
  355. )
  356. # ===== Phase 1: PREPARE TRACE =====
  357. async def _prepare_trace(
  358. self,
  359. messages: List[Dict],
  360. config: RunConfig,
  361. ) -> Tuple[Trace, Optional[GoalTree], int]:
  362. """
  363. 准备 Trace:创建新的或加载已有的
  364. Returns:
  365. (trace, goal_tree, next_sequence)
  366. """
  367. if config.trace_id:
  368. return await self._prepare_existing_trace(config)
  369. else:
  370. return await self._prepare_new_trace(messages, config)
  371. async def _prepare_new_trace(
  372. self,
  373. messages: List[Dict],
  374. config: RunConfig,
  375. ) -> Tuple[Trace, Optional[GoalTree], int]:
  376. """创建新 Trace"""
  377. trace_id = str(uuid.uuid4())
  378. # 生成任务名称
  379. task_name = config.name or await self._generate_task_name(messages)
  380. # 准备工具 Schema
  381. tool_schemas = self._get_tool_schemas(config.tools)
  382. trace_obj = Trace(
  383. trace_id=trace_id,
  384. mode="agent",
  385. task=task_name,
  386. agent_type=config.agent_type,
  387. parent_trace_id=config.parent_trace_id,
  388. parent_goal_id=config.parent_goal_id,
  389. uid=config.uid,
  390. model=config.model,
  391. tools=tool_schemas,
  392. llm_params={"temperature": config.temperature, **config.extra_llm_params},
  393. status="running",
  394. )
  395. goal_tree = self.goal_tree or GoalTree(mission=task_name)
  396. if self.trace_store:
  397. await self.trace_store.create_trace(trace_obj)
  398. await self.trace_store.update_goal_tree(trace_id, goal_tree)
  399. return trace_obj, goal_tree, 1
  400. async def _prepare_existing_trace(
  401. self,
  402. config: RunConfig,
  403. ) -> Tuple[Trace, Optional[GoalTree], int]:
  404. """加载已有 Trace(续跑或回溯)"""
  405. if not self.trace_store:
  406. raise ValueError("trace_store required for continue/rewind")
  407. trace_obj = await self.trace_store.get_trace(config.trace_id)
  408. if not trace_obj:
  409. raise ValueError(f"Trace not found: {config.trace_id}")
  410. goal_tree = await self.trace_store.get_goal_tree(config.trace_id)
  411. if goal_tree is None:
  412. # 防御性兜底:trace 存在但 goal.json 丢失时,创建空树
  413. goal_tree = GoalTree(mission=trace_obj.task or "Agent task")
  414. await self.trace_store.update_goal_tree(config.trace_id, goal_tree)
  415. # 自动判断行为:after_sequence 为 None 或 == head → 续跑;< head → 回溯
  416. after_seq = config.after_sequence
  417. # 如果 after_seq > head_sequence,说明 generator 被强制关闭时 store 的
  418. # head_sequence 未来得及更新(仍停在 Phase 2 写入的初始值)。
  419. # 用 last_sequence 修正 head_sequence,确保续跑时能看到完整历史。
  420. if after_seq is not None and after_seq > trace_obj.head_sequence:
  421. trace_obj.head_sequence = trace_obj.last_sequence
  422. await self.trace_store.update_trace(
  423. config.trace_id, head_sequence=trace_obj.head_sequence
  424. )
  425. if after_seq is not None and after_seq < trace_obj.head_sequence:
  426. # 回溯模式
  427. sequence = await self._rewind(config.trace_id, after_seq, goal_tree)
  428. else:
  429. # 续跑模式:从 last_sequence + 1 开始
  430. sequence = trace_obj.last_sequence + 1
  431. # 状态置为 running
  432. await self.trace_store.update_trace(
  433. config.trace_id,
  434. status="running",
  435. completed_at=None,
  436. )
  437. trace_obj.status = "running"
  438. return trace_obj, goal_tree, sequence
  439. # ===== Phase 2: BUILD HISTORY =====
  440. async def _get_embedding(self, text: str) -> List[float]:
  441. """
  442. 获取文本的嵌入向量(Embedding)
  443. Args:
  444. text: 需要向量化的文本
  445. Returns:
  446. List[float]: 嵌入向量
  447. """
  448. if not text or not text.strip():
  449. return []
  450. # 优先使用注入的 embedding_call
  451. if self.embedding_call:
  452. try:
  453. return await self.embedding_call(text)
  454. except Exception as e:
  455. logger.error(f"Error in embedding_call: {e}")
  456. raise
  457. # 兜底方案:如果没有注入 embedding_call,但有 llm_call,
  458. # 某些 SDK 封装可能支持通过 llm_call 的客户端直接获取
  459. # 这里建议强制要求基础设施层提供该函数以保证分层清晰
  460. raise ValueError("embedding_call function not provided to AgentRunner")
  461. async def _build_history(
  462. self,
  463. trace_id: str,
  464. new_messages: List[Dict],
  465. goal_tree: Optional[GoalTree],
  466. config: RunConfig,
  467. sequence: int,
  468. ) -> Tuple[List[Dict], int, List[Message]]:
  469. """
  470. 构建完整的 LLM 消息历史
  471. 1. 从 head_sequence 沿 parent chain 加载主路径消息(续跑/回溯场景)
  472. 2. 构建 system prompt(新建时注入 skills)
  473. 3. 新建时:在第一条 user message 末尾注入当前经验
  474. 4. 追加 input messages(设置 parent_sequence 链接到当前 head)
  475. Returns:
  476. (history, next_sequence, created_messages, head_sequence)
  477. created_messages: 本次新创建并持久化的 Message 列表,供 run() yield 给调用方
  478. head_sequence: 当前主路径头节点的 sequence
  479. """
  480. history: List[Dict] = []
  481. created_messages: List[Message] = []
  482. head_seq: Optional[int] = None # 当前主路径的头节点 sequence
  483. # 1. 加载已有 messages(通过主路径遍历)
  484. if config.trace_id and self.trace_store:
  485. trace_obj = await self.trace_store.get_trace(trace_id)
  486. if trace_obj and trace_obj.head_sequence > 0:
  487. main_path = await self.trace_store.get_main_path_messages(
  488. trace_id, trace_obj.head_sequence
  489. )
  490. # 修复 orphaned tool_calls(中断导致的 tool_call 无 tool_result)
  491. main_path, sequence = await self._heal_orphaned_tool_calls(
  492. main_path, trace_id, goal_tree, sequence,
  493. )
  494. history = [msg.to_llm_dict() for msg in main_path]
  495. if main_path:
  496. head_seq = main_path[-1].sequence
  497. # 2. 构建/注入 skills 到 system prompt
  498. has_system = any(m.get("role") == "system" for m in history)
  499. has_system_in_new = any(m.get("role") == "system" for m in new_messages)
  500. if not has_system:
  501. if has_system_in_new:
  502. # 入参消息已含 system,将 skills 注入其中(在 step 4 持久化之前)
  503. augmented = []
  504. for msg in new_messages:
  505. if msg.get("role") == "system":
  506. base = msg.get("content") or ""
  507. enriched = await self._build_system_prompt(config, base_prompt=base)
  508. augmented.append({**msg, "content": enriched or base})
  509. else:
  510. augmented.append(msg)
  511. new_messages = augmented
  512. else:
  513. # 没有 system,自动构建并插入历史
  514. system_prompt = await self._build_system_prompt(config)
  515. if system_prompt:
  516. history = [{"role": "system", "content": system_prompt}] + history
  517. if self.trace_store:
  518. system_msg = Message.create(
  519. trace_id=trace_id, role="system", sequence=sequence,
  520. goal_id=None, content=system_prompt,
  521. parent_sequence=None, # system message 是 root
  522. )
  523. await self.trace_store.add_message(system_msg)
  524. created_messages.append(system_msg)
  525. head_seq = sequence
  526. sequence += 1
  527. # 3. 追加新 messages(设置 parent_sequence 链接到当前 head)
  528. for msg_dict in new_messages:
  529. history.append(msg_dict)
  530. if self.trace_store:
  531. stored_msg = Message.from_llm_dict(
  532. msg_dict, trace_id=trace_id, sequence=sequence,
  533. goal_id=None, parent_sequence=head_seq,
  534. )
  535. await self.trace_store.add_message(stored_msg)
  536. created_messages.append(stored_msg)
  537. head_seq = sequence
  538. sequence += 1
  539. # 5. 更新 trace 的 head_sequence
  540. if self.trace_store and head_seq is not None:
  541. await self.trace_store.update_trace(trace_id, head_sequence=head_seq)
  542. return history, sequence, created_messages, head_seq or 0
  543. # ===== Phase 3: AGENT LOOP =====
  544. async def _agent_loop(
  545. self,
  546. trace: Trace,
  547. history: List[Dict],
  548. goal_tree: Optional[GoalTree],
  549. config: RunConfig,
  550. sequence: int,
  551. ) -> AsyncIterator[Union[Trace, Message]]:
  552. """ReAct 循环"""
  553. trace_id = trace.trace_id
  554. tool_schemas = self._get_tool_schemas(config.tools)
  555. # 当前主路径头节点的 sequence(用于设置 parent_sequence)
  556. head_seq = trace.head_sequence
  557. # 经验检索缓存:只在 goal 切换时重新检索
  558. _last_goal_id = None
  559. _cached_exp_text = ""
  560. for iteration in range(config.max_iterations):
  561. # 检查取消信号
  562. cancel_event = self._cancel_events.get(trace_id)
  563. if cancel_event and cancel_event.is_set():
  564. logger.info(f"Trace {trace_id} stopped by user")
  565. if self.trace_store:
  566. await self.trace_store.update_trace(
  567. trace_id,
  568. status="stopped",
  569. head_sequence=head_seq,
  570. completed_at=datetime.now(),
  571. )
  572. trace_obj = await self.trace_store.get_trace(trace_id)
  573. if trace_obj:
  574. yield trace_obj
  575. return
  576. # Level 1 压缩:GoalTree 过滤(当消息超过阈值时触发)
  577. compression_config = CompressionConfig()
  578. token_count = estimate_tokens(history)
  579. max_tokens = compression_config.get_max_tokens(config.model)
  580. # 压缩评估日志
  581. progress_pct = (token_count / max_tokens * 100) if max_tokens > 0 else 0
  582. msg_count = len(history)
  583. img_count = sum(
  584. 1 for msg in history
  585. if isinstance(msg.get("content"), list)
  586. for part in msg["content"]
  587. if isinstance(part, dict) and part.get("type") in ("image", "image_url")
  588. )
  589. print(f"\n[压缩评估] 消息数: {msg_count} | 图片数: {img_count} | Token: {token_count:,} / {max_tokens:,} ({progress_pct:.1f}%)")
  590. if token_count > max_tokens:
  591. print(f"[压缩评估] ⚠️ 超过阈值,触发压缩流程")
  592. else:
  593. print(f"[压缩评估] ✅ 未超阈值,无需压缩")
  594. if token_count > max_tokens and self.trace_store and goal_tree:
  595. # 使用本地 head_seq(store 中的 head_sequence 在 loop 期间未更新,是过时的)
  596. if head_seq > 0:
  597. main_path_msgs = await self.trace_store.get_main_path_messages(
  598. trace_id, head_seq
  599. )
  600. filtered_msgs = filter_by_goal_status(main_path_msgs, goal_tree)
  601. if len(filtered_msgs) < len(main_path_msgs):
  602. filtered_tokens = estimate_tokens([msg.to_llm_dict() for msg in filtered_msgs])
  603. print(
  604. f"[Level 1 压缩] 消息: {len(main_path_msgs)} → {len(filtered_msgs)} 条 | "
  605. f"Token: {token_count:,} → ~{filtered_tokens:,}"
  606. )
  607. logger.info(
  608. "Level 1 压缩: %d -> %d 条消息 (tokens ~%d, 阈值 %d)",
  609. len(main_path_msgs), len(filtered_msgs), token_count, max_tokens,
  610. )
  611. history = [msg.to_llm_dict() for msg in filtered_msgs]
  612. else:
  613. print(
  614. f"[Level 1 压缩] 无可过滤消息 ({len(main_path_msgs)} 条全部保留, "
  615. f"completed/abandoned goals={sum(1 for g in goal_tree.goals if g.status in ('completed', 'abandoned'))})"
  616. )
  617. logger.info(
  618. "Level 1 压缩: 无可过滤消息 (%d 条全部保留, completed/abandoned goals=%d)",
  619. len(main_path_msgs),
  620. sum(1 for g in goal_tree.goals
  621. if g.status in ("completed", "abandoned")),
  622. )
  623. elif token_count > max_tokens:
  624. print("[压缩评估] ⚠️ 无法执行 Level 1 压缩(缺少 store 或 goal_tree)")
  625. logger.warning(
  626. "消息 token 数 (%d) 超过阈值 (%d),但无法执行 Level 1 压缩(缺少 store 或 goal_tree)",
  627. token_count, max_tokens,
  628. )
  629. # Level 2 压缩:LLM 总结(Level 1 后仍超阈值时触发)
  630. token_count_after = estimate_tokens(history)
  631. if token_count_after > max_tokens:
  632. progress_pct_after = (token_count_after / max_tokens * 100) if max_tokens > 0 else 0
  633. print(
  634. f"[Level 2 压缩] Level 1 后仍超阈值: {token_count_after:,} / {max_tokens:,} ({progress_pct_after:.1f}%) "
  635. f"→ 触发 LLM 总结"
  636. )
  637. logger.info(
  638. "Level 1 后 token 仍超阈值 (%d > %d),触发 Level 2 压缩",
  639. token_count_after, max_tokens,
  640. )
  641. history, head_seq, sequence = await self._compress_history(
  642. trace_id, history, goal_tree, config, sequence, head_seq,
  643. )
  644. final_tokens = estimate_tokens(history)
  645. print(f"[Level 2 压缩] 完成: Token {token_count_after:,} → {final_tokens:,}")
  646. elif token_count > max_tokens:
  647. # Level 1 压缩成功,未触发 Level 2
  648. print(f"[压缩评估] ✅ Level 1 压缩后达标: {token_count_after:,} / {max_tokens:,}")
  649. print() # 空行分隔
  650. # 构建 LLM messages(注入上下文)
  651. llm_messages = list(history)
  652. # 先对历史消息应用 Prompt Caching(在注入动态内容之前)
  653. # 这样可以确保历史消息的缓存点固定,不受动态注入影响
  654. llm_messages = self._add_cache_control(
  655. llm_messages,
  656. config.model,
  657. config.enable_prompt_caching
  658. )
  659. # 然后追加动态注入的内容(不影响已缓存的历史消息)
  660. # 周期性注入 GoalTree + Collaborators
  661. if iteration % CONTEXT_INJECTION_INTERVAL == 0:
  662. context_injection = self._build_context_injection(trace, goal_tree)
  663. if context_injection:
  664. llm_messages.append({"role": "system", "content": context_injection})
  665. # 经验检索:goal 切换时重新检索,注入为 system message
  666. current_goal_id = goal_tree.current_id if goal_tree else None
  667. if current_goal_id and current_goal_id != _last_goal_id:
  668. _last_goal_id = current_goal_id
  669. current_goal = goal_tree.find(current_goal_id)
  670. if current_goal:
  671. try:
  672. relevant_exps = await _get_structured_experiences(
  673. query_text=current_goal.description,
  674. top_k=3,
  675. context={"runner": self}
  676. )
  677. if relevant_exps:
  678. self.used_ex_ids = [exp['id'] for exp in relevant_exps]
  679. parts = [f"[{exp['id']}] {exp['content']}" for exp in relevant_exps]
  680. _cached_exp_text = "## 参考历史经验\n" + "\n\n".join(parts)
  681. logger.info(
  682. "经验检索: goal='%s', 命中 %d 条 %s",
  683. current_goal.description[:40],
  684. len(relevant_exps),
  685. self.used_ex_ids,
  686. )
  687. else:
  688. _cached_exp_text = ""
  689. except Exception as e:
  690. logger.warning("经验检索失败: %s", e)
  691. _cached_exp_text = ""
  692. # 经验注入:goal切换时注入相关历史经验
  693. if _cached_exp_text:
  694. llm_messages.append({"role": "system", "content": _cached_exp_text})
  695. # 调用 LLM
  696. result = await self.llm_call(
  697. messages=llm_messages,
  698. model=config.model,
  699. tools=tool_schemas,
  700. temperature=config.temperature,
  701. **config.extra_llm_params,
  702. )
  703. response_content = result.get("content", "")
  704. tool_calls = result.get("tool_calls")
  705. finish_reason = result.get("finish_reason")
  706. prompt_tokens = result.get("prompt_tokens", 0)
  707. completion_tokens = result.get("completion_tokens", 0)
  708. step_cost = result.get("cost", 0)
  709. cache_creation_tokens = result.get("cache_creation_tokens")
  710. cache_read_tokens = result.get("cache_read_tokens")
  711. # 按需自动创建 root goal
  712. if goal_tree and not goal_tree.goals and tool_calls:
  713. has_goal_call = any(
  714. tc.get("function", {}).get("name") == "goal"
  715. for tc in tool_calls
  716. )
  717. if not has_goal_call:
  718. mission = goal_tree.mission
  719. root_desc = mission[:200] if len(mission) > 200 else mission
  720. goal_tree.add_goals(
  721. descriptions=[root_desc],
  722. reasons=["系统自动创建:Agent 未显式创建目标"],
  723. parent_id=None
  724. )
  725. goal_tree.focus(goal_tree.goals[0].id)
  726. if self.trace_store:
  727. await self.trace_store.add_goal(trace_id, goal_tree.goals[0])
  728. await self.trace_store.update_goal_tree(trace_id, goal_tree)
  729. logger.info(f"自动创建 root goal: {goal_tree.goals[0].id}")
  730. # 获取当前 goal_id
  731. current_goal_id = goal_tree.current_id if (goal_tree and goal_tree.current_id) else None
  732. # 记录 assistant Message(parent_sequence 指向当前 head)
  733. assistant_msg = Message.create(
  734. trace_id=trace_id,
  735. role="assistant",
  736. sequence=sequence,
  737. goal_id=current_goal_id,
  738. parent_sequence=head_seq if head_seq > 0 else None,
  739. content={"text": response_content, "tool_calls": tool_calls},
  740. prompt_tokens=prompt_tokens,
  741. completion_tokens=completion_tokens,
  742. cache_creation_tokens=cache_creation_tokens,
  743. cache_read_tokens=cache_read_tokens,
  744. finish_reason=finish_reason,
  745. cost=step_cost,
  746. )
  747. if self.trace_store:
  748. await self.trace_store.add_message(assistant_msg)
  749. # 记录模型使用
  750. await self.trace_store.record_model_usage(
  751. trace_id=trace_id,
  752. sequence=sequence - 1, # assistant_msg的sequence
  753. role="assistant",
  754. model=config.model,
  755. prompt_tokens=prompt_tokens,
  756. completion_tokens=completion_tokens,
  757. cache_read_tokens=cache_read_tokens or 0,
  758. )
  759. yield assistant_msg
  760. head_seq = sequence
  761. sequence += 1
  762. # 处理工具调用
  763. # 截断兜底:finish_reason == "length" 说明响应被 max_tokens 截断,
  764. # tool call 参数很可能不完整,不应执行,改为提示模型分批操作
  765. if tool_calls and finish_reason == "length":
  766. logger.warning(
  767. "[Runner] 响应被 max_tokens 截断,跳过 %d 个不完整的 tool calls",
  768. len(tool_calls),
  769. )
  770. truncation_hint = (
  771. "你的响应因为 max_tokens 限制被截断,tool call 参数不完整,未执行。"
  772. "请将大内容拆分为多次小的工具调用(例如用 write_file 的 append 模式分批写入)。"
  773. )
  774. history.append({
  775. "role": "assistant",
  776. "content": response_content,
  777. "tool_calls": tool_calls,
  778. })
  779. # 为每个被截断的 tool call 返回错误结果
  780. for tc in tool_calls:
  781. history.append({
  782. "role": "tool",
  783. "tool_call_id": tc["id"],
  784. "content": truncation_hint,
  785. })
  786. continue
  787. if tool_calls and config.auto_execute_tools:
  788. history.append({
  789. "role": "assistant",
  790. "content": response_content,
  791. "tool_calls": tool_calls,
  792. })
  793. for tc in tool_calls:
  794. current_goal_id = goal_tree.current_id if (goal_tree and goal_tree.current_id) else None
  795. tool_name = tc["function"]["name"]
  796. tool_args = tc["function"]["arguments"]
  797. if isinstance(tool_args, str):
  798. tool_args = json.loads(tool_args) if tool_args.strip() else {}
  799. elif tool_args is None:
  800. tool_args = {}
  801. tool_result = await self.tools.execute(
  802. tool_name,
  803. tool_args,
  804. uid=config.uid or "",
  805. context={
  806. "store": self.trace_store,
  807. "trace_id": trace_id,
  808. "goal_id": current_goal_id,
  809. "runner": self,
  810. "goal_tree": goal_tree,
  811. }
  812. )
  813. # --- 支持多模态工具反馈 ---
  814. # execute() 返回 dict{"text","images","tool_usage"} 或 str
  815. # 统一为dict格式
  816. if isinstance(tool_result, str):
  817. tool_result = {"text": tool_result}
  818. tool_text = tool_result.get("text", str(tool_result))
  819. tool_images = tool_result.get("images", [])
  820. tool_usage = tool_result.get("tool_usage") # 新增:提取tool_usage
  821. # 处理多模态消息
  822. if tool_images:
  823. tool_result_text = tool_text
  824. # 构建多模态消息格式
  825. tool_content_for_llm = [{"type": "text", "text": tool_text}]
  826. for img in tool_images:
  827. if img.get("type") == "base64" and img.get("data"):
  828. media_type = img.get("media_type", "image/png")
  829. tool_content_for_llm.append({
  830. "type": "image_url",
  831. "image_url": {
  832. "url": f"data:{media_type};base64,{img['data']}"
  833. }
  834. })
  835. img_count = len(tool_content_for_llm) - 1 # 减去 text 块
  836. print(f"[Runner] 多模态工具反馈: tool={tool_name}, images={img_count}, text_len={len(tool_result_text)}")
  837. else:
  838. tool_result_text = tool_text
  839. tool_content_for_llm = tool_text
  840. tool_msg = Message.create(
  841. trace_id=trace_id,
  842. role="tool",
  843. sequence=sequence,
  844. goal_id=current_goal_id,
  845. parent_sequence=head_seq,
  846. tool_call_id=tc["id"],
  847. # 存储完整内容:有图片时保留 list(含 image_url),纯文本时存字符串
  848. content={"tool_name": tool_name, "result": tool_content_for_llm},
  849. )
  850. if self.trace_store:
  851. await self.trace_store.add_message(tool_msg)
  852. # 记录工具的模型使用
  853. if tool_usage:
  854. await self.trace_store.record_model_usage(
  855. trace_id=trace_id,
  856. sequence=sequence,
  857. role="tool",
  858. tool_name=tool_name,
  859. model=tool_usage.get("model"),
  860. prompt_tokens=tool_usage.get("prompt_tokens", 0),
  861. completion_tokens=tool_usage.get("completion_tokens", 0),
  862. cache_read_tokens=tool_usage.get("cache_read_tokens", 0),
  863. )
  864. # 截图单独存为同名 PNG 文件
  865. if tool_images:
  866. import base64 as b64mod
  867. for img in tool_images:
  868. if img.get("data"):
  869. png_path = self.trace_store._get_messages_dir(trace_id) / f"{tool_msg.message_id}.png"
  870. png_path.write_bytes(b64mod.b64decode(img["data"]))
  871. print(f"[Runner] 截图已保存: {png_path.name}")
  872. break # 只存第一张
  873. yield tool_msg
  874. head_seq = sequence
  875. sequence += 1
  876. history.append({
  877. "role": "tool",
  878. "tool_call_id": tc["id"],
  879. "name": tool_name,
  880. "content": tool_content_for_llm, # 这里传入 list 即可触发模型的视觉能力
  881. })
  882. # ------------------------------------------
  883. continue # 继续循环
  884. # 无工具调用,任务完成
  885. break
  886. # 更新 head_sequence 并完成 Trace
  887. if self.trace_store:
  888. await self.trace_store.update_trace(
  889. trace_id,
  890. status="completed",
  891. head_sequence=head_seq,
  892. completed_at=datetime.now(),
  893. )
  894. trace_obj = await self.trace_store.get_trace(trace_id)
  895. if trace_obj:
  896. yield trace_obj
  897. # ===== Level 2: LLM 压缩 =====
  898. async def _compress_history(
  899. self,
  900. trace_id: str,
  901. history: List[Dict],
  902. goal_tree: Optional[GoalTree],
  903. config: RunConfig,
  904. sequence: int,
  905. head_seq: int,
  906. ) -> Tuple[List[Dict], int, int]:
  907. """
  908. Level 2 压缩:LLM 总结
  909. Step 1: 经验提取(reflect)— 纯内存 LLM 调用 + 文件追加,不影响 trace
  910. Step 2: 压缩总结 — LLM 生成 summary
  911. Step 3: 存储 summary 为新消息,parent_sequence 跳到 system msg
  912. Step 4: 重建 history
  913. Returns:
  914. (new_history, new_head_seq, next_sequence)
  915. """
  916. logger.info("Level 2 压缩开始: trace=%s, 当前 history 长度=%d", trace_id, len(history))
  917. # 找到 system message 的 sequence(主路径第一条消息)
  918. system_msg_seq = None
  919. system_msg_dict = None
  920. if self.trace_store:
  921. trace_obj = await self.trace_store.get_trace(trace_id)
  922. if trace_obj and trace_obj.head_sequence > 0:
  923. main_path = await self.trace_store.get_main_path_messages(
  924. trace_id, trace_obj.head_sequence
  925. )
  926. for msg in main_path:
  927. if msg.role == "system":
  928. system_msg_seq = msg.sequence
  929. system_msg_dict = msg.to_llm_dict()
  930. break
  931. # Fallback: 从 history 中找 system message
  932. if system_msg_dict is None:
  933. for msg_dict in history:
  934. if msg_dict.get("role") == "system":
  935. system_msg_dict = msg_dict
  936. break
  937. if system_msg_dict is None:
  938. logger.warning("Level 2 压缩跳过:未找到 system message")
  939. return history, head_seq, sequence
  940. # --- Step 1: 经验提取(reflect)---
  941. try:
  942. # 1. 构造 Reflect Prompt(确保包含格式要求)
  943. # 建议在 build_reflect_prompt() 里加入:
  944. # "请使用格式:- [intent: 意图, state: 状态描述] 具体的经验内容"
  945. reflect_prompt = build_reflect_prompt()
  946. reflect_messages = list(history) + [{"role": "user", "content": reflect_prompt}]
  947. # 应用 Prompt Caching
  948. reflect_messages = self._add_cache_control(
  949. reflect_messages,
  950. config.model,
  951. config.enable_prompt_caching
  952. )
  953. reflect_result = await self.llm_call(
  954. messages=reflect_messages,
  955. model=config.model,
  956. tools=[],
  957. temperature=0.2, # 略微保持一点发散性
  958. **config.extra_llm_params,
  959. )
  960. reflection_text = reflect_result.get("content", "").strip()
  961. if reflection_text:
  962. import re as _re2
  963. import uuid as _uuid2
  964. pattern = r"-\s*\[(?P<tags>.*?)\]\s*(?P<content>.*)"
  965. matches = list(_re2.finditer(pattern, reflection_text))
  966. structured_entries = []
  967. for match in matches:
  968. tags_str = match.group("tags")
  969. content = match.group("content")
  970. intent_match = _re2.search(r"intent:\s*(.*?)(?:,|$)", tags_str, _re2.IGNORECASE)
  971. state_match = _re2.search(r"state:\s*(.*?)(?:,|$)", tags_str, _re2.IGNORECASE)
  972. intents = [i.strip() for i in intent_match.group(1).split(",")] if intent_match and intent_match.group(1) else []
  973. states = [s.strip() for s in state_match.group(1).split(",")] if state_match and state_match.group(1) else []
  974. ex_id = f"ex_{datetime.now().strftime('%m%d%H%M')}_{_uuid2.uuid4().hex[:4]}"
  975. entry = f"""---
  976. id: {ex_id}
  977. trace_id: {trace_id}
  978. tags: {{intent: {intents}, state: {states}}}
  979. metrics: {{helpful: 1, harmful: 0}}
  980. created_at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
  981. ---
  982. - {content}
  983. - 经验ID: [{ex_id}]"""
  984. structured_entries.append(entry)
  985. if structured_entries:
  986. os.makedirs(os.path.dirname(self.experiences_path), exist_ok=True)
  987. with open(self.experiences_path, "a", encoding="utf-8") as f:
  988. f.write("\n\n" + "\n\n".join(structured_entries))
  989. logger.info(f"已提取并保存 {len(structured_entries)} 条结构化经验")
  990. else:
  991. logger.warning("未能解析出符合格式的经验条目,请检查 REFLECT_PROMPT。")
  992. logger.debug(f"LLM Raw Output:\n{reflection_text}")
  993. else:
  994. logger.warning("LLM 未生成反思内容")
  995. except Exception as e:
  996. logger.error(f"Level 2 经验提取失败: {e}")
  997. # --- Step 2: 压缩总结 + 经验评估 ---
  998. compress_prompt = build_compression_prompt(goal_tree, used_ex_ids=self.used_ex_ids)
  999. compress_messages = list(history) + [{"role": "user", "content": compress_prompt}]
  1000. # 应用 Prompt Caching
  1001. compress_messages = self._add_cache_control(
  1002. compress_messages,
  1003. config.model,
  1004. config.enable_prompt_caching
  1005. )
  1006. compress_result = await self.llm_call(
  1007. messages=compress_messages,
  1008. model=config.model,
  1009. tools=[],
  1010. temperature=config.temperature,
  1011. **config.extra_llm_params,
  1012. )
  1013. raw_output = compress_result.get("content", "").strip()
  1014. if not raw_output:
  1015. logger.warning("Level 2 压缩跳过:LLM 未返回内容")
  1016. return history, head_seq, sequence
  1017. # 解析 [[EVALUATION]] 块并更新经验
  1018. if self.used_ex_ids:
  1019. try:
  1020. eval_block = ""
  1021. if "[[EVALUATION]]" in raw_output:
  1022. eval_start = raw_output.index("[[EVALUATION]]") + len("[[EVALUATION]]")
  1023. eval_end = raw_output.index("[[SUMMARY]]") if "[[SUMMARY]]" in raw_output else len(raw_output)
  1024. eval_block = raw_output[eval_start:eval_end].strip()
  1025. if eval_block:
  1026. import re as _re
  1027. update_map = {}
  1028. for line in eval_block.splitlines():
  1029. m = _re.search(r"ID:\s*(ex_\S+)\s*\|\s*Result:\s*(\w+)", line)
  1030. if m:
  1031. ex_id, result = m.group(1), m.group(2).lower()
  1032. if result in ("helpful", "harmful"):
  1033. update_map[ex_id] = {"action": result, "feedback": ""}
  1034. elif result == "mixed":
  1035. update_map[ex_id] = {"action": "helpful", "feedback": ""}
  1036. if update_map:
  1037. count = await _batch_update_experiences(update_map, context={"runner": self})
  1038. logger.info("经验评估完成,更新了 %s 条经验", count)
  1039. except Exception as e:
  1040. logger.warning("经验评估解析失败(不影响压缩): %s", e)
  1041. # 提取 [[SUMMARY]] 块
  1042. summary_text = raw_output
  1043. if "[[SUMMARY]]" in raw_output:
  1044. summary_text = raw_output[raw_output.index("[[SUMMARY]]") + len("[[SUMMARY]]"):].strip()
  1045. # 压缩完成后清空 used_ex_ids
  1046. self.used_ex_ids = []
  1047. if not summary_text:
  1048. logger.warning("Level 2 压缩跳过:LLM 未返回 summary")
  1049. return history, head_seq, sequence
  1050. # --- Step 3: 存储 summary 消息 ---
  1051. summary_with_header = (
  1052. f"## 对话历史摘要(自动压缩)\n\n{summary_text}\n\n"
  1053. "---\n请基于以上摘要和当前 GoalTree 继续执行任务。"
  1054. )
  1055. summary_msg = Message.create(
  1056. trace_id=trace_id,
  1057. role="user",
  1058. sequence=sequence,
  1059. goal_id=None,
  1060. parent_sequence=system_msg_seq, # 跳到 system msg,跳过所有中间消息
  1061. content=summary_with_header,
  1062. )
  1063. if self.trace_store:
  1064. await self.trace_store.add_message(summary_msg)
  1065. new_head_seq = sequence
  1066. sequence += 1
  1067. # --- Step 4: 重建 history ---
  1068. new_history = [system_msg_dict, summary_msg.to_llm_dict()]
  1069. # 更新 trace head_sequence
  1070. if self.trace_store:
  1071. await self.trace_store.update_trace(
  1072. trace_id,
  1073. head_sequence=new_head_seq,
  1074. )
  1075. logger.info(
  1076. "Level 2 压缩完成: 旧 history %d 条 → 新 history %d 条, summary 长度=%d",
  1077. len(history), len(new_history), len(summary_text),
  1078. )
  1079. return new_history, new_head_seq, sequence
  1080. # ===== 回溯(Rewind)=====
  1081. async def _rewind(
  1082. self,
  1083. trace_id: str,
  1084. after_sequence: int,
  1085. goal_tree: Optional[GoalTree],
  1086. ) -> int:
  1087. """
  1088. 执行回溯:快照 GoalTree,重建干净树,设置 head_sequence
  1089. 新消息的 parent_sequence 将指向 rewind 点,旧消息通过树结构自然脱离主路径。
  1090. Returns:
  1091. 下一个可用的 sequence 号
  1092. """
  1093. if not self.trace_store:
  1094. raise ValueError("trace_store required for rewind")
  1095. # 1. 加载所有 messages(用于 safe cutoff 和 max sequence)
  1096. all_messages = await self.trace_store.get_trace_messages(trace_id)
  1097. if not all_messages:
  1098. return 1
  1099. # 2. 找到安全截断点(确保不截断在 tool_call 和 tool response 之间)
  1100. cutoff = self._find_safe_cutoff(all_messages, after_sequence)
  1101. # 3. 快照并重建 GoalTree
  1102. if goal_tree:
  1103. # 获取截断点消息的 created_at 作为时间界限
  1104. cutoff_msg = None
  1105. for msg in all_messages:
  1106. if msg.sequence == cutoff:
  1107. cutoff_msg = msg
  1108. break
  1109. cutoff_time = cutoff_msg.created_at if cutoff_msg else datetime.now()
  1110. # 快照到 events(含 head_sequence 供前端感知分支切换)
  1111. await self.trace_store.append_event(trace_id, "rewind", {
  1112. "after_sequence": cutoff,
  1113. "head_sequence": cutoff,
  1114. "goal_tree_snapshot": goal_tree.to_dict(),
  1115. })
  1116. # 按时间重建干净的 GoalTree
  1117. new_tree = goal_tree.rebuild_for_rewind(cutoff_time)
  1118. await self.trace_store.update_goal_tree(trace_id, new_tree)
  1119. # 更新内存中的引用
  1120. goal_tree.goals = new_tree.goals
  1121. goal_tree.current_id = new_tree.current_id
  1122. # 4. 更新 head_sequence 到 rewind 点
  1123. await self.trace_store.update_trace(trace_id, head_sequence=cutoff)
  1124. # 5. 返回 next sequence(全局递增,不复用)
  1125. max_seq = max((m.sequence for m in all_messages), default=0)
  1126. return max_seq + 1
  1127. def _find_safe_cutoff(self, messages: List[Message], after_sequence: int) -> int:
  1128. """
  1129. 找到安全的截断点。
  1130. 如果 after_sequence 指向一条带 tool_calls 的 assistant message,
  1131. 则自动扩展到其所有对应的 tool response 之后。
  1132. """
  1133. cutoff = after_sequence
  1134. # 找到 after_sequence 对应的 message
  1135. target_msg = None
  1136. for msg in messages:
  1137. if msg.sequence == after_sequence:
  1138. target_msg = msg
  1139. break
  1140. if not target_msg:
  1141. return cutoff
  1142. # 如果是 assistant 且有 tool_calls,找到所有对应的 tool responses
  1143. if target_msg.role == "assistant":
  1144. content = target_msg.content
  1145. if isinstance(content, dict) and content.get("tool_calls"):
  1146. tool_call_ids = set()
  1147. for tc in content["tool_calls"]:
  1148. if isinstance(tc, dict) and tc.get("id"):
  1149. tool_call_ids.add(tc["id"])
  1150. # 找到这些 tool_call 对应的 tool messages
  1151. for msg in messages:
  1152. if (msg.role == "tool" and msg.tool_call_id
  1153. and msg.tool_call_id in tool_call_ids):
  1154. cutoff = max(cutoff, msg.sequence)
  1155. return cutoff
  1156. async def _heal_orphaned_tool_calls(
  1157. self,
  1158. messages: List[Message],
  1159. trace_id: str,
  1160. goal_tree: Optional[GoalTree],
  1161. sequence: int,
  1162. ) -> tuple:
  1163. """
  1164. 检测并修复消息历史中的 orphaned tool_calls。
  1165. 当 agent 被 stop/crash 中断时,可能有 assistant 的 tool_calls 没有对应的
  1166. tool results(包括多 tool_call 部分完成的情况)。直接发给 LLM 会导致 400。
  1167. 修复策略:为每个缺失的 tool_result 插入合成的"中断通知"消息,而非裁剪。
  1168. - 普通工具:简短中断提示
  1169. - agent/evaluate:包含 sub_trace_id、执行统计、continue_from 指引
  1170. 合成消息持久化到 store,确保幂等(下次续跑不再触发)。
  1171. Returns:
  1172. (healed_messages, next_sequence)
  1173. """
  1174. if not messages:
  1175. return messages, sequence
  1176. # 收集所有 tool_call IDs → (assistant_msg, tool_call_dict)
  1177. tc_map: Dict[str, tuple] = {}
  1178. result_ids: set = set()
  1179. for msg in messages:
  1180. if msg.role == "assistant":
  1181. content = msg.content
  1182. if isinstance(content, dict) and content.get("tool_calls"):
  1183. for tc in content["tool_calls"]:
  1184. tc_id = tc.get("id")
  1185. if tc_id:
  1186. tc_map[tc_id] = (msg, tc)
  1187. elif msg.role == "tool" and msg.tool_call_id:
  1188. result_ids.add(msg.tool_call_id)
  1189. orphaned_ids = [tc_id for tc_id in tc_map if tc_id not in result_ids]
  1190. if not orphaned_ids:
  1191. return messages, sequence
  1192. logger.info(
  1193. "检测到 %d 个 orphaned tool_calls,生成合成中断通知",
  1194. len(orphaned_ids),
  1195. )
  1196. healed = list(messages)
  1197. head_seq = messages[-1].sequence
  1198. for tc_id in orphaned_ids:
  1199. assistant_msg, tc = tc_map[tc_id]
  1200. tool_name = tc.get("function", {}).get("name", "unknown")
  1201. if tool_name in ("agent", "evaluate"):
  1202. result_text = self._build_agent_interrupted_result(
  1203. tc, goal_tree, assistant_msg,
  1204. )
  1205. else:
  1206. result_text = (
  1207. f"⚠️ 工具 {tool_name} 执行被中断(进程异常退出),"
  1208. "未获得执行结果。请根据需要重新调用。"
  1209. )
  1210. synthetic_msg = Message.create(
  1211. trace_id=trace_id,
  1212. role="tool",
  1213. sequence=sequence,
  1214. goal_id=assistant_msg.goal_id,
  1215. parent_sequence=head_seq,
  1216. tool_call_id=tc_id,
  1217. content={"tool_name": tool_name, "result": result_text},
  1218. )
  1219. if self.trace_store:
  1220. await self.trace_store.add_message(synthetic_msg)
  1221. healed.append(synthetic_msg)
  1222. head_seq = sequence
  1223. sequence += 1
  1224. # 更新 trace head/last sequence
  1225. if self.trace_store:
  1226. await self.trace_store.update_trace(
  1227. trace_id,
  1228. head_sequence=head_seq,
  1229. last_sequence=max(head_seq, sequence - 1),
  1230. )
  1231. return healed, sequence
  1232. def _build_agent_interrupted_result(
  1233. self,
  1234. tc: Dict,
  1235. goal_tree: Optional[GoalTree],
  1236. assistant_msg: Message,
  1237. ) -> str:
  1238. """为中断的 agent/evaluate 工具调用构建合成结果(对齐正常返回值格式)"""
  1239. args_str = tc.get("function", {}).get("arguments", "{}")
  1240. try:
  1241. args = json.loads(args_str) if isinstance(args_str, str) else args_str
  1242. except json.JSONDecodeError:
  1243. args = {}
  1244. task = args.get("task", "未知任务")
  1245. if isinstance(task, list):
  1246. task = "; ".join(task)
  1247. tool_name = tc.get("function", {}).get("name", "agent")
  1248. mode = "evaluate" if tool_name == "evaluate" else "delegate"
  1249. # 从 goal_tree 查找 sub_trace 信息
  1250. sub_trace_id = None
  1251. stats = None
  1252. if goal_tree and assistant_msg.goal_id:
  1253. goal = goal_tree.find(assistant_msg.goal_id)
  1254. if goal and goal.sub_trace_ids:
  1255. first = goal.sub_trace_ids[0]
  1256. if isinstance(first, dict):
  1257. sub_trace_id = first.get("trace_id")
  1258. elif isinstance(first, str):
  1259. sub_trace_id = first
  1260. if goal.cumulative_stats:
  1261. s = goal.cumulative_stats
  1262. if s.message_count > 0:
  1263. stats = {
  1264. "message_count": s.message_count,
  1265. "total_tokens": s.total_tokens,
  1266. "total_cost": round(s.total_cost, 4),
  1267. }
  1268. result: Dict[str, Any] = {
  1269. "mode": mode,
  1270. "status": "interrupted",
  1271. "summary": "⚠️ 子Agent执行被中断(进程异常退出)",
  1272. "task": task,
  1273. }
  1274. if sub_trace_id:
  1275. result["sub_trace_id"] = sub_trace_id
  1276. result["hint"] = (
  1277. f'使用 continue_from="{sub_trace_id}" 可继续执行,保留已有进度'
  1278. )
  1279. if stats:
  1280. result["stats"] = stats
  1281. return json.dumps(result, ensure_ascii=False, indent=2)
  1282. # ===== 上下文注入 =====
  1283. def _build_context_injection(
  1284. self,
  1285. trace: Trace,
  1286. goal_tree: Optional[GoalTree],
  1287. ) -> str:
  1288. """构建周期性注入的上下文(GoalTree + Active Collaborators + Focus 提醒)"""
  1289. parts = []
  1290. # GoalTree
  1291. if goal_tree and goal_tree.goals:
  1292. parts.append(f"## Current Plan\n\n{goal_tree.to_prompt()}")
  1293. # 检测 focus 在有子节点的父目标上:提醒模型 focus 到具体子目标
  1294. if goal_tree.current_id:
  1295. children = goal_tree.get_children(goal_tree.current_id)
  1296. pending_children = [c for c in children if c.status in ("pending", "in_progress")]
  1297. if pending_children:
  1298. child_ids = ", ".join(
  1299. goal_tree._generate_display_id(c) for c in pending_children[:3]
  1300. )
  1301. parts.append(
  1302. f"**提醒**:当前焦点在父目标上,建议用 `goal(focus=\"...\")` "
  1303. f"切换到具体子目标(如 {child_ids})再执行。"
  1304. )
  1305. # Active Collaborators
  1306. collaborators = trace.context.get("collaborators", [])
  1307. if collaborators:
  1308. lines = ["## Active Collaborators"]
  1309. for c in collaborators:
  1310. status_str = c.get("status", "unknown")
  1311. ctype = c.get("type", "agent")
  1312. summary = c.get("summary", "")
  1313. name = c.get("name", "unnamed")
  1314. lines.append(f"- {name} [{ctype}, {status_str}]: {summary}")
  1315. parts.append("\n".join(lines))
  1316. return "\n\n".join(parts)
  1317. # ===== 辅助方法 =====
  1318. def _add_cache_control(
  1319. self,
  1320. messages: List[Dict],
  1321. model: str,
  1322. enable: bool
  1323. ) -> List[Dict]:
  1324. """
  1325. 为支持的模型添加 Prompt Caching 标记
  1326. 策略:固定位置 + 延迟查找
  1327. 1. system message 添加缓存(如果足够长)
  1328. 2. 固定位置缓存点(20, 40, 60, 80),确保每个缓存点间隔 >= 1024 tokens
  1329. 3. 最多使用 4 个缓存点(含 system)
  1330. Args:
  1331. messages: 原始消息列表
  1332. model: 模型名称
  1333. enable: 是否启用缓存
  1334. Returns:
  1335. 添加了 cache_control 的消息列表(深拷贝)
  1336. """
  1337. if not enable:
  1338. return messages
  1339. # 只对 Claude 模型启用
  1340. if "claude" not in model.lower():
  1341. return messages
  1342. # 深拷贝避免修改原始数据
  1343. import copy
  1344. messages = copy.deepcopy(messages)
  1345. # 策略 1: 为 system message 添加缓存
  1346. system_cached = False
  1347. for msg in messages:
  1348. if msg.get("role") == "system":
  1349. content = msg.get("content", "")
  1350. if isinstance(content, str) and len(content) > 1000:
  1351. msg["content"] = [{
  1352. "type": "text",
  1353. "text": content,
  1354. "cache_control": {"type": "ephemeral"}
  1355. }]
  1356. system_cached = True
  1357. logger.debug(f"[Cache] 为 system message 添加缓存标记 (len={len(content)})")
  1358. break
  1359. # 策略 2: 固定位置缓存点
  1360. CACHE_INTERVAL = 20
  1361. MAX_POINTS = 3 if system_cached else 4
  1362. MIN_TOKENS = 1024
  1363. AVG_TOKENS_PER_MSG = 70
  1364. total_msgs = len(messages)
  1365. if total_msgs == 0:
  1366. return messages
  1367. cache_positions = []
  1368. last_cache_pos = 0
  1369. for i in range(1, MAX_POINTS + 1):
  1370. target_pos = i * CACHE_INTERVAL - 1 # 19, 39, 59, 79
  1371. if target_pos >= total_msgs:
  1372. break
  1373. # 从目标位置开始查找合适的 user/assistant 消息
  1374. for j in range(target_pos, total_msgs):
  1375. msg = messages[j]
  1376. if msg.get("role") not in ("user", "assistant"):
  1377. continue
  1378. content = msg.get("content", "")
  1379. if not content:
  1380. continue
  1381. # 检查 content 是否非空
  1382. is_valid = False
  1383. if isinstance(content, str):
  1384. is_valid = len(content) > 0
  1385. elif isinstance(content, list):
  1386. is_valid = any(
  1387. isinstance(block, dict) and
  1388. block.get("type") == "text" and
  1389. len(block.get("text", "")) > 0
  1390. for block in content
  1391. )
  1392. if not is_valid:
  1393. continue
  1394. # 检查 token 距离
  1395. msg_count = j - last_cache_pos
  1396. estimated_tokens = msg_count * AVG_TOKENS_PER_MSG
  1397. if estimated_tokens >= MIN_TOKENS:
  1398. cache_positions.append(j)
  1399. last_cache_pos = j
  1400. logger.debug(f"[Cache] 在位置 {j} 添加缓存点 (估算 {estimated_tokens} tokens)")
  1401. break
  1402. # 应用缓存标记
  1403. for idx in cache_positions:
  1404. msg = messages[idx]
  1405. content = msg.get("content", "")
  1406. if isinstance(content, str):
  1407. msg["content"] = [{
  1408. "type": "text",
  1409. "text": content,
  1410. "cache_control": {"type": "ephemeral"}
  1411. }]
  1412. logger.debug(f"[Cache] 为 message[{idx}] ({msg.get('role')}) 添加缓存标记")
  1413. elif isinstance(content, list):
  1414. # 在最后一个 text block 添加 cache_control
  1415. for block in reversed(content):
  1416. if isinstance(block, dict) and block.get("type") == "text":
  1417. block["cache_control"] = {"type": "ephemeral"}
  1418. logger.debug(f"[Cache] 为 message[{idx}] ({msg.get('role')}) 添加缓存标记")
  1419. break
  1420. logger.debug(
  1421. f"[Cache] 总消息: {total_msgs}, "
  1422. f"缓存点: {len(cache_positions)} at {cache_positions}"
  1423. )
  1424. return messages
  1425. def _get_tool_schemas(self, tools: Optional[List[str]]) -> List[Dict]:
  1426. """
  1427. 获取工具 Schema
  1428. - tools=None: 使用 registry 中全部已注册工具(含内置 + 外部注册的)
  1429. - tools=["a", "b"]: 在 BUILTIN_TOOLS 基础上追加指定工具
  1430. """
  1431. if tools is None:
  1432. # 全部已注册工具
  1433. tool_names = self.tools.get_tool_names()
  1434. else:
  1435. # BUILTIN_TOOLS + 显式指定的额外工具
  1436. tool_names = BUILTIN_TOOLS.copy()
  1437. for t in tools:
  1438. if t not in tool_names:
  1439. tool_names.append(t)
  1440. return self.tools.get_schemas(tool_names)
  1441. # 默认 system prompt 前缀(当 config.system_prompt 和前端都未提供 system message 时使用)
  1442. DEFAULT_SYSTEM_PREFIX = "你是最顶尖的AI助手,可以拆分并调用工具逐步解决复杂问题。"
  1443. async def _build_system_prompt(self, config: RunConfig, base_prompt: Optional[str] = None) -> Optional[str]:
  1444. """构建 system prompt(注入 skills)
  1445. 优先级:
  1446. 1. config.skills 显式指定 → 按名称过滤
  1447. 2. config.skills 为 None → 查 preset 的默认 skills 列表
  1448. 3. preset 也无 skills(None)→ 加载全部(向后兼容)
  1449. Args:
  1450. base_prompt: 已有 system 内容(来自消息或 config.system_prompt),
  1451. None 时使用 config.system_prompt
  1452. """
  1453. from agent.core.presets import AGENT_PRESETS
  1454. system_prompt = base_prompt if base_prompt is not None else config.system_prompt
  1455. # 确定要加载哪些 skills
  1456. skills_filter: Optional[List[str]] = config.skills
  1457. if skills_filter is None:
  1458. preset = AGENT_PRESETS.get(config.agent_type)
  1459. if preset is not None:
  1460. skills_filter = preset.skills # 可能仍为 None(加载全部)
  1461. # 加载并过滤
  1462. all_skills = load_skills_from_dir(self.skills_dir)
  1463. if skills_filter is not None:
  1464. skills = [s for s in all_skills if s.name in skills_filter]
  1465. else:
  1466. skills = all_skills
  1467. skills_text = self._format_skills(skills) if skills else ""
  1468. if system_prompt:
  1469. if skills_text:
  1470. system_prompt += f"\n\n## Skills\n{skills_text}"
  1471. else:
  1472. system_prompt = self.DEFAULT_SYSTEM_PREFIX
  1473. if skills_text:
  1474. system_prompt += f"\n\n## Skills\n{skills_text}"
  1475. return system_prompt
  1476. async def _generate_task_name(self, messages: List[Dict]) -> str:
  1477. """生成任务名称:优先使用 utility_llm,fallback 到文本截取"""
  1478. # 提取 messages 中的文本内容
  1479. text_parts = []
  1480. for msg in messages:
  1481. content = msg.get("content", "")
  1482. if isinstance(content, str):
  1483. text_parts.append(content)
  1484. elif isinstance(content, list):
  1485. for part in content:
  1486. if isinstance(part, dict) and part.get("type") == "text":
  1487. text_parts.append(part.get("text", ""))
  1488. raw_text = " ".join(text_parts).strip()
  1489. if not raw_text:
  1490. return "未命名任务"
  1491. # 尝试使用 utility_llm 生成标题
  1492. if self.utility_llm_call:
  1493. try:
  1494. result = await self.utility_llm_call(
  1495. messages=[
  1496. {"role": "system", "content": "用中文为以下任务生成一个简短标题(10-30字),只输出标题本身:"},
  1497. {"role": "user", "content": raw_text[:2000]},
  1498. ],
  1499. model="gpt-4o-mini", # 使用便宜模型
  1500. )
  1501. title = result.get("content", "").strip()
  1502. if title and len(title) < 100:
  1503. return title
  1504. except Exception:
  1505. pass
  1506. # Fallback: 截取前 50 字符
  1507. return raw_text[:50] + ("..." if len(raw_text) > 50 else "")
  1508. def _format_skills(self, skills: List[Skill]) -> str:
  1509. if not skills:
  1510. return ""
  1511. return "\n\n".join(s.to_prompt_text() for s in skills)
  1512. def _load_experiences(self) -> str:
  1513. """从文件加载经验(./.cache/experiences.md)"""
  1514. if not self.experiences_path:
  1515. return ""
  1516. try:
  1517. if os.path.exists(self.experiences_path):
  1518. with open(self.experiences_path, "r", encoding="utf-8") as f:
  1519. return f.read().strip()
  1520. except Exception as e:
  1521. logger.warning(f"Failed to load experiences from {self.experiences_path}: {e}")
  1522. return ""