runner.py 136 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080
  1. """
  2. Agent Runner - Agent 执行引擎
  3. 核心职责:
  4. 1. 执行 Agent 任务(循环调用 LLM + 工具)
  5. 2. 记录执行轨迹(Trace + Messages + GoalTree)
  6. 3. 加载和注入技能(Skill)
  7. 4. 管理执行计划(GoalTree)
  8. 5. 支持续跑(continue)和回溯重跑(rewind)
  9. 参数分层:
  10. - Infrastructure: AgentRunner 构造时设置(trace_store, llm_call 等)
  11. - RunConfig: 每次 run 时指定(model, trace_id, after_sequence 等)
  12. - Messages: OpenAI SDK 格式的任务消息
  13. """
  14. import asyncio
  15. import json
  16. import logging
  17. import os
  18. import uuid
  19. from dataclasses import dataclass, field
  20. from datetime import datetime
  21. from typing import AsyncIterator, Optional, Dict, Any, List, Callable, Literal, Tuple, Union
  22. from agent.trace.models import Trace, Message
  23. from agent.trace.protocols import TraceStore
  24. from agent.trace.goal_models import GoalTree
  25. from agent.trace.compaction import (
  26. CompressionConfig,
  27. compress_completed_goals,
  28. estimate_tokens,
  29. needs_level2_compression,
  30. build_compression_prompt,
  31. )
  32. from agent.skill.models import Skill
  33. from agent.skill.skill_loader import load_skills_from_dir
  34. from agent.tools import ToolRegistry, get_tool_registry
  35. from agent.tools.builtin.knowledge import KnowledgeConfig
  36. from agent.core.memory import MemoryConfig
  37. from agent.core.prompts import (
  38. DEFAULT_SYSTEM_PREFIX,
  39. TRUNCATION_HINT,
  40. TOOL_INTERRUPTED_MESSAGE,
  41. AGENT_INTERRUPTED_SUMMARY,
  42. AGENT_CONTINUE_HINT_TEMPLATE,
  43. TASK_NAME_GENERATION_SYSTEM_PROMPT,
  44. TASK_NAME_FALLBACK,
  45. SUMMARY_HEADER_TEMPLATE,
  46. build_summary_header,
  47. build_tool_interrupted_message,
  48. build_agent_continue_hint,
  49. )
  50. logger = logging.getLogger(__name__)
  51. @dataclass
  52. class ContextUsage:
  53. """Context 使用情况"""
  54. trace_id: str
  55. message_count: int
  56. token_count: int
  57. max_tokens: int
  58. usage_percent: float
  59. image_count: int = 0
  60. @dataclass
  61. class SideBranchContext:
  62. """侧分支上下文(压缩/反思/知识评估)"""
  63. type: Literal["compression", "reflection", "knowledge_eval"]
  64. branch_id: str
  65. start_head_seq: int # 侧分支起点的 head_seq
  66. start_sequence: int # 侧分支第一条消息的 sequence
  67. start_history_length: int # 侧分支起点的 history 长度
  68. start_iteration: int # 侧分支开始时的 iteration
  69. max_turns: int = 5 # 最大轮次
  70. def to_dict(self) -> Dict[str, Any]:
  71. """转换为字典(用于持久化和传递给工具)"""
  72. return {
  73. "type": self.type,
  74. "branch_id": self.branch_id,
  75. "start_head_seq": self.start_head_seq,
  76. "start_sequence": self.start_sequence,
  77. "start_iteration": self.start_iteration,
  78. "max_turns": self.max_turns,
  79. "is_side_branch": True,
  80. "started_at": datetime.now().isoformat(),
  81. }
  82. # ===== 运行配置 =====
  83. @dataclass
  84. class RunConfig:
  85. """
  86. 运行参数 — 控制 Agent 如何执行
  87. 分为模型层参数(由上游 agent 或用户决定)和框架层参数(由系统注入)。
  88. """
  89. # --- 模型层参数 ---
  90. model: str = "gpt-4o"
  91. temperature: float = 0.3
  92. max_iterations: int = 200
  93. tools: Optional[List[str]] = None # None = 按 tool_groups 过滤;显式列表 = 精确指定
  94. tool_groups: Optional[List[str]] = field(default_factory=lambda: ["core"]) # 工具分组白名单;默认仅 core,项目按需追加
  95. exclude_tools: List[str] = field(default_factory=list) # 从 tools / tool_groups 结果中再排除的工具名(如远程 agent 禁用 agent/evaluate)
  96. side_branch_max_turns: int = 5 # 侧分支最大轮次(压缩/反思)
  97. goal_compression: Literal["none", "on_complete", "on_overflow"] = "on_overflow" # Goal 压缩模式
  98. # --- 强制侧分支(用于 API 手动触发或自动压缩流程)---
  99. # 使用列表作为侧分支队列,每次完成一个侧分支后 pop(0) 取下一个
  100. force_side_branch: Optional[List[Literal["compression", "reflection"]]] = None
  101. # --- 框架层参数 ---
  102. agent_type: str = "default"
  103. uid: Optional[str] = None
  104. system_prompt: Optional[str] = None # None = 从 skills 自动构建
  105. skills: Optional[List[str]] = None # 注入 system prompt 的 skill 名称列表;None = 按 preset 决定
  106. enable_memory: bool = True
  107. auto_execute_tools: bool = True
  108. name: Optional[str] = None # 显示名称(空则由 utility_llm 自动生成)
  109. enable_prompt_caching: bool = True # 启用 Anthropic Prompt Caching(仅 Claude 模型有效)
  110. parallel_tool_execution: bool = False # 是否启用并发 Tool Call 执行(慎用,需确保无资源冲突)
  111. # --- Trace 控制 ---
  112. trace_id: Optional[str] = None # None = 新建
  113. parent_trace_id: Optional[str] = None # 子 Agent 专用
  114. parent_goal_id: Optional[str] = None
  115. # --- 续跑控制 ---
  116. after_sequence: Optional[int] = None # 从哪条消息后续跑(message sequence)
  117. # --- 额外 LLM 参数(传给 llm_call 的 **kwargs)---
  118. extra_llm_params: Dict[str, Any] = field(default_factory=dict)
  119. # --- 自定义元数据上下文 ---
  120. context: Dict[str, Any] = field(default_factory=dict)
  121. # --- 研究流程控制 ---
  122. enable_research_flow: bool = True # 是否启用自动研究流程(知识检索→经验检索→调研→计划)
  123. # --- 知识管理配置 ---
  124. knowledge: KnowledgeConfig = field(default_factory=KnowledgeConfig)
  125. # --- Memory 配置(见 agent/docs/memory.md) ---
  126. # None = 默认 Agent(无长期记忆);赋值 MemoryConfig 使该 Agent 成为 memory-bearing Agent
  127. memory: Optional["MemoryConfig"] = None
  128. # BUILTIN_TOOLS 硬编码列表已移除(2026-04)。
  129. # 工具可用性现在由 @tool(groups=[...]) 声明 + RunConfig.tool_groups 过滤控制。
  130. @dataclass
  131. class CallResult:
  132. """单次调用结果"""
  133. reply: str
  134. tool_calls: Optional[List[Dict]] = None
  135. trace_id: Optional[str] = None
  136. step_id: Optional[str] = None
  137. tokens: Optional[Dict[str, int]] = None
  138. cost: float = 0.0
  139. # ===== 执行引擎 =====
  140. CONTEXT_INJECTION_INTERVAL = 5 # 每 N 轮注入一次 GoalTree + Collaborators + IM 通知
  141. class AgentRunner:
  142. """
  143. Agent 执行引擎
  144. 支持三种运行模式(通过 RunConfig 区分):
  145. 1. 新建:trace_id=None
  146. 2. 续跑:trace_id=已有ID, after_sequence=None 或 == head
  147. 3. 回溯:trace_id=已有ID, after_sequence=N(N < head_sequence)
  148. """
  149. def __init__(
  150. self,
  151. trace_store: Optional[TraceStore] = None,
  152. tool_registry: Optional[ToolRegistry] = None,
  153. llm_call: Optional[Callable] = None,
  154. utility_llm_call: Optional[Callable] = None,
  155. skills_dir: Optional[str] = None,
  156. goal_tree: Optional[GoalTree] = None,
  157. debug: bool = False,
  158. logger_name: Optional[str] = None,
  159. ):
  160. """
  161. 初始化 AgentRunner
  162. Args:
  163. trace_store: Trace 存储
  164. tool_registry: 工具注册表(默认使用全局注册表)
  165. llm_call: 主 LLM 调用函数
  166. utility_llm_call: 轻量 LLM(用于生成任务标题等),可选
  167. skills_dir: Skills 目录路径
  168. goal_tree: 初始 GoalTree(可选)
  169. debug: 保留参数(已废弃)
  170. logger_name: 自定义日志名称(如 "agents.knowledge_manager"),默认用模块名
  171. """
  172. self.trace_store = trace_store
  173. self.tools = tool_registry or get_tool_registry()
  174. self.llm_call = llm_call
  175. self.utility_llm_call = utility_llm_call
  176. self.skills_dir = skills_dir
  177. self.goal_tree = goal_tree
  178. self.debug = debug
  179. self.log = logging.getLogger(logger_name) if logger_name else logger
  180. self.stdin_check: Optional[Callable] = None # 由外部设置,用于子 agent 执行期间检查 stdin
  181. self._cancel_events: Dict[str, asyncio.Event] = {} # trace_id → cancel event
  182. # 知识保存跟踪(每个 trace 独立)
  183. self._saved_knowledge_ids: Dict[str, List[str]] = {} # trace_id → [knowledge_ids]
  184. # Context 使用跟踪
  185. self._context_warned: Dict[str, set] = {} # trace_id → {30, 50, 80} 已警告过的阈值
  186. self._context_usage: Dict[str, ContextUsage] = {} # trace_id → 当前用量快照
  187. # 图片优化缓存(避免重复处理)
  188. # key: 图片内容的 hash, value: {"downscaled": ..., "description": ...}
  189. self._image_opt_cache: Dict[str, Dict[str, Any]] = {}
  190. # 当前 run 的 MemoryConfig(由 run() 根据 RunConfig.memory 设置)
  191. # dream 工具从 context.runner 读取此字段,判断是否 memory-bearing
  192. self._current_memory_config: Optional[MemoryConfig] = None
  193. # ===== 核心公开方法 =====
  194. def get_context_usage(self, trace_id: str) -> Optional[ContextUsage]:
  195. """获取指定 trace 的 context 使用情况"""
  196. return self._context_usage.get(trace_id)
  197. async def dream(
  198. self,
  199. memory_config: MemoryConfig,
  200. trace_filter: Optional[Callable[["Trace"], bool]] = None,
  201. reflect_model: str = "gpt-4o-mini",
  202. dream_model: str = "gpt-4o",
  203. ) -> "DreamReport":
  204. """执行 dream(整理长期记忆)——外部调度入口。
  205. Agent 主动调用走 dream 工具;外部调度(定时器、CLI)走这个方法。
  206. Args:
  207. memory_config: 记忆配置
  208. trace_filter: 可选 trace 过滤(按 agent_type/owner 等)
  209. reflect_model: per-trace 反思模型
  210. dream_model: 跨 trace 整合模型
  211. """
  212. from agent.core.dream import run_dream
  213. if not self.trace_store or not self.llm_call:
  214. raise RuntimeError("dream 需要 trace_store 和 llm_call 均已配置")
  215. return await run_dream(
  216. store=self.trace_store,
  217. llm_call=self.llm_call,
  218. memory_config=memory_config,
  219. trace_filter=trace_filter,
  220. reflect_model=reflect_model,
  221. dream_model=dream_model,
  222. )
  223. async def run(
  224. self,
  225. messages: List[Dict],
  226. config: Optional[RunConfig] = None,
  227. inject_skills: Optional[List[str]] = None,
  228. skill_recency_threshold: int = 10,
  229. ) -> AsyncIterator[Union[Trace, Message]]:
  230. """
  231. Agent 模式执行(核心方法)
  232. Args:
  233. messages: OpenAI SDK 格式的输入消息
  234. 新建: 初始任务消息 [{"role": "user", "content": "..."}]
  235. 续跑: 追加的新消息
  236. 回溯: 在插入点之后追加的消息
  237. config: 运行配置
  238. inject_skills: 本次调用需要指定注入的 skill 列表(skill 名称)
  239. skill_recency_threshold: 最近 N 条消息内有该 skill 就不重复注入
  240. Yields:
  241. Union[Trace, Message]: Trace 对象(状态变化)或 Message 对象(执行过程)
  242. """
  243. if not self.llm_call:
  244. raise ValueError("llm_call function not provided")
  245. config = config or RunConfig()
  246. trace = None
  247. # Memory 模式开关(dream 工具会读取此字段)
  248. self._current_memory_config = config.memory
  249. try:
  250. # Phase 1: PREPARE TRACE
  251. trace, goal_tree, sequence = await self._prepare_trace(messages, config)
  252. # 注册取消事件
  253. self._cancel_events[trace.trace_id] = asyncio.Event()
  254. yield trace
  255. # 检查是否有未完成的侧分支(用于用户追加消息场景)
  256. side_branch_ctx_for_build: Optional[SideBranchContext] = None
  257. if trace.context.get("active_side_branch") and messages:
  258. side_branch_data = trace.context["active_side_branch"]
  259. # 创建侧分支上下文(用于标记用户追加的消息)
  260. side_branch_ctx_for_build = SideBranchContext(
  261. type=side_branch_data["type"],
  262. branch_id=side_branch_data["branch_id"],
  263. start_head_seq=side_branch_data["start_head_seq"],
  264. start_sequence=side_branch_data["start_sequence"],
  265. start_history_length=0,
  266. start_iteration=side_branch_data.get("start_iteration", 0),
  267. max_turns=side_branch_data.get("max_turns", config.side_branch_max_turns),
  268. )
  269. # Phase 2: BUILD HISTORY
  270. history, sequence, created_messages, head_seq = await self._build_history(
  271. trace.trace_id, messages, goal_tree, config, sequence, side_branch_ctx_for_build
  272. )
  273. # Update trace's head_sequence in memory
  274. trace.head_sequence = head_seq
  275. for msg in created_messages:
  276. yield msg
  277. # Phase 3: AGENT LOOP
  278. async for event in self._agent_loop(
  279. trace, history, goal_tree, config, sequence,
  280. inject_skills=inject_skills,
  281. skill_recency_threshold=skill_recency_threshold,
  282. ):
  283. yield event
  284. except Exception as e:
  285. self.log.error(f"Agent run failed: {e}")
  286. tid = config.trace_id or (trace.trace_id if trace else None)
  287. if self.trace_store and tid:
  288. # 读取当前 last_sequence 作为 head_sequence,确保续跑时能加载完整历史
  289. current = await self.trace_store.get_trace(tid)
  290. head_seq = current.last_sequence if current else None
  291. await self.trace_store.update_trace(
  292. tid,
  293. status="failed",
  294. head_sequence=head_seq,
  295. error_message=str(e),
  296. completed_at=datetime.now()
  297. )
  298. trace_obj = await self.trace_store.get_trace(tid)
  299. if trace_obj:
  300. yield trace_obj
  301. raise
  302. finally:
  303. # 清理取消事件
  304. if trace:
  305. self._cancel_events.pop(trace.trace_id, None)
  306. async def run_result(
  307. self,
  308. messages: List[Dict],
  309. config: Optional[RunConfig] = None,
  310. on_event: Optional[Callable] = None,
  311. inject_skills: Optional[List[str]] = None,
  312. ) -> Dict[str, Any]:
  313. """
  314. 结果模式 — 消费 run(),返回结构化结果。
  315. 主要用于 agent/evaluate 工具内部。
  316. Args:
  317. on_event: 可选回调,每个 Trace/Message 事件触发一次,用于实时输出子 Agent 执行过程。
  318. inject_skills: 本次调用需要指定注入的 skill 列表(透传给 run())。
  319. """
  320. last_assistant_text = ""
  321. final_trace: Optional[Trace] = None
  322. async for item in self.run(messages=messages, config=config, inject_skills=inject_skills):
  323. if on_event:
  324. on_event(item)
  325. if isinstance(item, Message) and item.role == "assistant":
  326. content = item.content
  327. text = ""
  328. if isinstance(content, dict):
  329. text = content.get("text", "") or ""
  330. elif isinstance(content, str):
  331. text = content
  332. if text and text.strip():
  333. last_assistant_text = text
  334. elif isinstance(item, Trace):
  335. final_trace = item
  336. config = config or RunConfig()
  337. if not final_trace and config.trace_id and self.trace_store:
  338. final_trace = await self.trace_store.get_trace(config.trace_id)
  339. status = final_trace.status if final_trace else "unknown"
  340. error = final_trace.error_message if final_trace else None
  341. summary = last_assistant_text
  342. if not summary:
  343. status = "failed"
  344. error = error or "Agent 没有产生 assistant 文本结果"
  345. # 获取保存的知识 ID
  346. trace_id = final_trace.trace_id if final_trace else config.trace_id
  347. saved_knowledge_ids = self._saved_knowledge_ids.get(trace_id, [])
  348. return {
  349. "status": status,
  350. "summary": summary,
  351. "trace_id": trace_id,
  352. "error": error,
  353. "saved_knowledge_ids": saved_knowledge_ids, # 新增:返回保存的知识 ID
  354. "stats": {
  355. "total_messages": final_trace.total_messages if final_trace else 0,
  356. "total_tokens": final_trace.total_tokens if final_trace else 0,
  357. "total_cost": final_trace.total_cost if final_trace else 0.0,
  358. },
  359. }
  360. async def stop(self, trace_id: str) -> bool:
  361. """
  362. 停止运行中的 Trace
  363. 设置取消信号,agent loop 在下一个 LLM 调用前检查并退出。
  364. Trace 状态置为 "stopped"。
  365. Returns:
  366. True 如果成功发送停止信号,False 如果该 trace 不在运行中
  367. """
  368. cancel_event = self._cancel_events.get(trace_id)
  369. if cancel_event is None:
  370. return False
  371. cancel_event.set()
  372. return True
  373. # ===== 单次调用(保留)=====
  374. async def call(
  375. self,
  376. messages: List[Dict],
  377. model: str = "gpt-4o",
  378. tools: Optional[List[str]] = None,
  379. uid: Optional[str] = None,
  380. trace: bool = True,
  381. **kwargs
  382. ) -> CallResult:
  383. """
  384. 单次 LLM 调用(无 Agent Loop)
  385. """
  386. if not self.llm_call:
  387. raise ValueError("llm_call function not provided")
  388. trace_id = None
  389. message_id = None
  390. tool_schemas = self._get_tool_schemas(tools)
  391. if trace and self.trace_store:
  392. trace_obj = Trace.create(mode="call", uid=uid, model=model, tools=tool_schemas, llm_params=kwargs)
  393. trace_id = await self.trace_store.create_trace(trace_obj)
  394. result = await self.llm_call(messages=messages, model=model, tools=tool_schemas, **kwargs)
  395. if trace and self.trace_store and trace_id:
  396. msg = Message.create(
  397. trace_id=trace_id, role="assistant", sequence=1, goal_id=None,
  398. content={"text": result.get("content", ""), "tool_calls": result.get("tool_calls")},
  399. prompt_tokens=result.get("prompt_tokens", 0),
  400. completion_tokens=result.get("completion_tokens", 0),
  401. finish_reason=result.get("finish_reason"),
  402. cost=result.get("cost", 0),
  403. )
  404. message_id = await self.trace_store.add_message(msg)
  405. await self.trace_store.update_trace(trace_id, status="completed", completed_at=datetime.now())
  406. return CallResult(
  407. reply=result.get("content", ""),
  408. tool_calls=result.get("tool_calls"),
  409. trace_id=trace_id,
  410. step_id=message_id,
  411. tokens={"prompt": result.get("prompt_tokens", 0), "completion": result.get("completion_tokens", 0)},
  412. cost=result.get("cost", 0)
  413. )
  414. # ===== Phase 1: PREPARE TRACE =====
  415. async def _prepare_trace(
  416. self,
  417. messages: List[Dict],
  418. config: RunConfig,
  419. ) -> Tuple[Trace, Optional[GoalTree], int]:
  420. """
  421. 准备 Trace:创建新的或加载已有的
  422. Returns:
  423. (trace, goal_tree, next_sequence)
  424. """
  425. if config.trace_id:
  426. return await self._prepare_existing_trace(config)
  427. else:
  428. return await self._prepare_new_trace(messages, config)
  429. async def _prepare_new_trace(
  430. self,
  431. messages: List[Dict],
  432. config: RunConfig,
  433. ) -> Tuple[Trace, Optional[GoalTree], int]:
  434. """创建新 Trace"""
  435. trace_id = str(uuid.uuid4())
  436. # 生成任务名称
  437. task_name = config.name or await self._generate_task_name(messages)
  438. # 准备工具 Schema
  439. tool_schemas = self._get_tool_schemas(config.tools, config.tool_groups, config.exclude_tools)
  440. trace_obj = Trace(
  441. trace_id=trace_id,
  442. mode="agent",
  443. task=task_name,
  444. agent_type=config.agent_type,
  445. parent_trace_id=config.parent_trace_id,
  446. parent_goal_id=config.parent_goal_id,
  447. uid=config.uid,
  448. model=config.model,
  449. tools=tool_schemas,
  450. llm_params={"temperature": config.temperature, **config.extra_llm_params},
  451. context=config.context,
  452. status="running",
  453. )
  454. goal_tree = self.goal_tree or GoalTree(mission=task_name)
  455. if self.trace_store:
  456. await self.trace_store.create_trace(trace_obj)
  457. await self.trace_store.update_goal_tree(trace_id, goal_tree)
  458. return trace_obj, goal_tree, 1
  459. async def _prepare_existing_trace(
  460. self,
  461. config: RunConfig,
  462. ) -> Tuple[Trace, Optional[GoalTree], int]:
  463. """加载已有 Trace(续跑或回溯)"""
  464. if not self.trace_store:
  465. raise ValueError("trace_store required for continue/rewind")
  466. trace_obj = await self.trace_store.get_trace(config.trace_id)
  467. if not trace_obj:
  468. raise ValueError(f"Trace not found: {config.trace_id}")
  469. goal_tree = await self.trace_store.get_goal_tree(config.trace_id)
  470. if goal_tree is None:
  471. # 防御性兜底:trace 存在但 goal.json 丢失时,创建空树
  472. goal_tree = GoalTree(mission=trace_obj.task or "Agent task")
  473. await self.trace_store.update_goal_tree(config.trace_id, goal_tree)
  474. # 自动判断行为:after_sequence 为 None 或 == head → 续跑;< head → 回溯
  475. after_seq = config.after_sequence
  476. # 如果 after_seq > head_sequence,说明 generator 被强制关闭时 store 的
  477. # head_sequence 未来得及更新(仍停在 Phase 2 写入的初始值)。
  478. # 用 last_sequence 修正 head_sequence,确保续跑时能看到完整历史。
  479. if after_seq is not None and after_seq > trace_obj.head_sequence:
  480. trace_obj.head_sequence = trace_obj.last_sequence
  481. await self.trace_store.update_trace(
  482. config.trace_id, head_sequence=trace_obj.head_sequence
  483. )
  484. if after_seq is not None and after_seq < trace_obj.head_sequence:
  485. # 回溯模式
  486. sequence = await self._rewind(config.trace_id, after_seq, goal_tree)
  487. else:
  488. # 续跑模式:从 last_sequence + 1 开始
  489. sequence = trace_obj.last_sequence + 1
  490. # 状态置为 running
  491. await self.trace_store.update_trace(
  492. config.trace_id,
  493. status="running",
  494. completed_at=None,
  495. )
  496. trace_obj.status = "running"
  497. # 广播状态变化给前端
  498. try:
  499. from agent.trace.websocket import broadcast_trace_status_changed
  500. await broadcast_trace_status_changed(config.trace_id, "running")
  501. except Exception:
  502. pass
  503. return trace_obj, goal_tree, sequence
  504. # ===== Phase 2: BUILD HISTORY =====
  505. async def _build_history(
  506. self,
  507. trace_id: str,
  508. new_messages: List[Dict],
  509. goal_tree: Optional[GoalTree],
  510. config: RunConfig,
  511. sequence: int,
  512. side_branch_ctx: Optional[SideBranchContext] = None,
  513. ) -> Tuple[List[Dict], int, List[Message], int]:
  514. """
  515. 构建完整的 LLM 消息历史
  516. 1. 从 head_sequence 沿 parent chain 加载主路径消息(续跑/回溯场景)
  517. 2. 构建 system prompt(新建时注入 skills)
  518. 3. 新建时:在第一条 user message 末尾注入当前经验
  519. 4. 追加 input messages(设置 parent_sequence 链接到当前 head)
  520. 5. 如果在侧分支中,追加的消息自动标记为侧分支消息
  521. Returns:
  522. (history, next_sequence, created_messages, head_sequence)
  523. created_messages: 本次新创建并持久化的 Message 列表,供 run() yield 给调用方
  524. head_sequence: 当前主路径头节点的 sequence
  525. """
  526. history: List[Dict] = []
  527. created_messages: List[Message] = []
  528. head_seq: Optional[int] = None # 当前主路径的头节点 sequence
  529. # 1. 加载已有 messages(通过主路径遍历)
  530. if config.trace_id and self.trace_store:
  531. trace_obj = await self.trace_store.get_trace(trace_id)
  532. if trace_obj and trace_obj.head_sequence > 0:
  533. main_path = await self.trace_store.get_main_path_messages(
  534. trace_id, trace_obj.head_sequence
  535. )
  536. # 修复 orphaned tool_calls(中断导致的 tool_call 无 tool_result)
  537. main_path, sequence = await self._heal_orphaned_tool_calls(
  538. main_path, trace_id, goal_tree, sequence,
  539. )
  540. history = [msg.to_llm_dict() for msg in main_path]
  541. if main_path:
  542. head_seq = main_path[-1].sequence
  543. # 2. 构建/注入 skills 到 system prompt
  544. has_system = any(m.get("role") == "system" for m in history)
  545. has_system_in_new = any(m.get("role") == "system" for m in new_messages)
  546. if not has_system:
  547. if has_system_in_new:
  548. # 入参消息已含 system,将 skills 注入其中(在 step 4 持久化之前)
  549. augmented = []
  550. for msg in new_messages:
  551. if msg.get("role") == "system":
  552. base = msg.get("content") or ""
  553. enriched = await self._build_system_prompt(config, base_prompt=base)
  554. augmented.append({**msg, "content": enriched or base})
  555. else:
  556. augmented.append(msg)
  557. new_messages = augmented
  558. else:
  559. # 没有 system,自动构建并插入历史
  560. system_prompt = await self._build_system_prompt(config)
  561. if system_prompt:
  562. history = [{"role": "system", "content": system_prompt}] + history
  563. if self.trace_store:
  564. system_msg = Message.create(
  565. trace_id=trace_id, role="system", sequence=sequence,
  566. goal_id=None, content=system_prompt,
  567. parent_sequence=None, # system message 是 root
  568. )
  569. await self.trace_store.add_message(system_msg)
  570. created_messages.append(system_msg)
  571. head_seq = sequence
  572. sequence += 1
  573. # 3. 追加新 messages(设置 parent_sequence 链接到当前 head)
  574. for msg_dict in new_messages:
  575. history.append(msg_dict)
  576. if self.trace_store:
  577. # 如果在侧分支中,标记为侧分支消息
  578. if side_branch_ctx:
  579. stored_msg = Message.create(
  580. trace_id=trace_id,
  581. role=msg_dict["role"],
  582. sequence=sequence,
  583. goal_id=goal_tree.current_id if goal_tree else None,
  584. parent_sequence=head_seq,
  585. branch_type=side_branch_ctx.type,
  586. branch_id=side_branch_ctx.branch_id,
  587. content=msg_dict.get("content"),
  588. )
  589. self.log.info(f"用户在侧分支 {side_branch_ctx.type} 中追加消息")
  590. else:
  591. stored_msg = Message.from_llm_dict(
  592. msg_dict, trace_id=trace_id, sequence=sequence,
  593. goal_id=None, parent_sequence=head_seq,
  594. )
  595. await self.trace_store.add_message(stored_msg)
  596. created_messages.append(stored_msg)
  597. head_seq = sequence
  598. sequence += 1
  599. # 5. 更新 trace 的 head_sequence
  600. if self.trace_store and head_seq is not None:
  601. await self.trace_store.update_trace(trace_id, head_sequence=head_seq)
  602. return history, sequence, created_messages, head_seq or 0
  603. # ===== Phase 3: AGENT LOOP =====
  604. async def _manage_context_usage(
  605. self,
  606. trace_id: str,
  607. history: List[Dict],
  608. goal_tree: Optional[GoalTree],
  609. config: RunConfig,
  610. sequence: int,
  611. head_seq: int,
  612. ) -> Tuple[List[Dict], int, int, bool]:
  613. """
  614. 管理 context 用量:检查、预警、压缩
  615. Returns:
  616. (updated_history, new_head_seq, next_sequence, needs_enter_compression_branch)
  617. """
  618. compression_config = CompressionConfig()
  619. token_count = estimate_tokens(history)
  620. max_tokens = compression_config.get_max_tokens(config.model)
  621. # 计算使用率
  622. progress_pct = (token_count / max_tokens * 100) if max_tokens > 0 else 0
  623. msg_count = len(history)
  624. img_count = sum(
  625. 1 for msg in history
  626. if isinstance(msg.get("content"), list)
  627. for part in msg["content"]
  628. if isinstance(part, dict) and part.get("type") in ("image", "image_url")
  629. )
  630. # 更新 context usage 快照
  631. self._context_usage[trace_id] = ContextUsage(
  632. trace_id=trace_id,
  633. message_count=msg_count,
  634. token_count=token_count,
  635. max_tokens=max_tokens,
  636. usage_percent=progress_pct,
  637. image_count=img_count,
  638. )
  639. # 阈值警告(30%, 50%, 80%)
  640. if trace_id not in self._context_warned:
  641. self._context_warned[trace_id] = set()
  642. for threshold in [30, 50, 80]:
  643. if progress_pct >= threshold and threshold not in self._context_warned[trace_id]:
  644. self._context_warned[trace_id].add(threshold)
  645. self.log.warning(
  646. f"Context 使用率达到 {threshold}%: {token_count:,} / {max_tokens:,} tokens ({msg_count} 条消息)"
  647. )
  648. # 检查是否需要压缩(仅基于 token 数量)
  649. needs_compression = token_count > max_tokens
  650. if not needs_compression:
  651. return history, head_seq, sequence, False
  652. # 检查是否有待评估知识(压缩前必须先评估)
  653. if self.trace_store and not config.force_side_branch:
  654. pending = await self.trace_store.get_pending_knowledge_entries(trace_id)
  655. if pending:
  656. # 设置侧分支队列:反思 → 知识评估 → 压缩
  657. # 反思放在前面,确保反思期间完成的 goal 产生的新知识也能在压缩前被评估
  658. if config.knowledge.enable_extraction:
  659. config.force_side_branch = ["reflection", "knowledge_eval", "compression"]
  660. else:
  661. config.force_side_branch = ["knowledge_eval", "compression"]
  662. # 在 trace.context 中设置触发事件
  663. trace = await self.trace_store.get_trace(trace_id)
  664. if trace:
  665. if not trace.context:
  666. trace.context = {}
  667. trace.context["knowledge_eval_trigger"] = "compression"
  668. await self.trace_store.update_trace(trace_id, context=trace.context)
  669. self.log.info(f"[Knowledge Eval] 压缩前触发知识评估,待评估: {len(pending)} 条")
  670. return history, head_seq, sequence, True
  671. # 知识提取:在任何压缩发生前,用完整 history 做反思(进入反思侧分支)
  672. if config.knowledge.enable_extraction and not config.force_side_branch:
  673. # 设置侧分支队列:先反思,再压缩
  674. config.force_side_branch = ["reflection", "compression"]
  675. return history, head_seq, sequence, True
  676. # 以下为未启用反思、需要压缩的情况,直接进行level 1压缩,并检查是否需要进行level 2压缩(进入侧分支)
  677. # Level 1 压缩:Goal 完成压缩
  678. if config.goal_compression != "none" and self.trace_store and goal_tree:
  679. if head_seq > 0:
  680. main_path_msgs = await self.trace_store.get_main_path_messages(
  681. trace_id, head_seq
  682. )
  683. compressed_msgs = compress_completed_goals(main_path_msgs, goal_tree)
  684. if len(compressed_msgs) < len(main_path_msgs):
  685. self.log.info(
  686. "Level 1 压缩: %d -> %d 条消息",
  687. len(main_path_msgs), len(compressed_msgs),
  688. )
  689. history = [msg.to_llm_dict() for msg in compressed_msgs]
  690. else:
  691. self.log.info(
  692. "Level 1 压缩: 无可过滤消息 (%d 条全部保留)",
  693. len(main_path_msgs),
  694. )
  695. elif needs_compression:
  696. self.log.warning(
  697. "Token 数 (%d) 超过阈值,但无法执行 Level 1 压缩(缺少 store 或 goal_tree,或 goal_compression=none)",
  698. token_count,
  699. )
  700. # Level 2 压缩:检查 Level 1 后是否仍超阈值
  701. # 注意:Level 1 压缩后需要重新优化图片并计算 token
  702. optimized_history_after = await self._optimize_images(history, config.model)
  703. token_count_after = estimate_tokens(optimized_history_after)
  704. needs_level2 = token_count_after > max_tokens
  705. if needs_level2:
  706. self.log.info(
  707. "Level 1 后仍超阈值 (token=%d/%d),需要进入压缩侧分支",
  708. token_count_after, max_tokens,
  709. )
  710. # 如果还没有设置侧分支(说明没有启用知识提取),直接进入压缩
  711. if not config.force_side_branch:
  712. config.force_side_branch = ["compression"]
  713. # 返回标志,让主循环进入侧分支
  714. return history, head_seq, sequence, True
  715. # 压缩完成后,输出最终发给模型的消息列表
  716. self.log.info("Level 1 压缩完成,发送给模型的消息列表:")
  717. for idx, msg in enumerate(history):
  718. role = msg.get("role", "unknown")
  719. content = msg.get("content", "")
  720. if isinstance(content, str):
  721. preview = content[:100] + ("..." if len(content) > 100 else "")
  722. elif isinstance(content, list):
  723. preview = f"[{len(content)} blocks]"
  724. else:
  725. preview = str(content)[:100]
  726. self.log.info(f" [{idx}] {role}: {preview}")
  727. return history, head_seq, sequence, False
  728. async def _build_knowledge_eval_prompt(
  729. self,
  730. trace_id: str,
  731. goal_tree: Optional[GoalTree]
  732. ) -> str:
  733. """构建知识评估 prompt"""
  734. if not self.trace_store:
  735. return ""
  736. pending = await self.trace_store.get_pending_knowledge_entries(trace_id)
  737. if not pending:
  738. return ""
  739. # 获取mission
  740. trace = await self.trace_store.get_trace(trace_id)
  741. mission = trace.task if trace else "未知任务"
  742. # 获取当前Goal
  743. current_goal = goal_tree.find(goal_tree.current_id) if goal_tree and goal_tree.current_id else None
  744. goal_desc = current_goal.description if current_goal else "无当前目标"
  745. # 构建知识列表
  746. knowledge_list = []
  747. for idx, entry in enumerate(pending, 1):
  748. knowledge_list.append(
  749. f"### 知识 {idx}: {entry['knowledge_id']}\n"
  750. f"- task: {entry['task']}\n"
  751. f"- content: {entry['content']}\n"
  752. f"- 注入于: sequence {entry['injected_at_sequence']}, goal {entry['goal_id']}"
  753. )
  754. prompt = f"""你是知识评估助手。请评估以下知识在本次任务执行中的实际效果。
  755. ## 当前任务(Mission)
  756. {mission}
  757. ## 当前 Goal
  758. {goal_desc}
  759. ## 待评估知识列表
  760. {chr(10).join(knowledge_list)}
  761. ## 评估维度
  762. 1. **helpfulness**: 知识内容是否对完成任务有实质帮助?
  763. 2. **relevance**: 执行过程中是否体现了该知识的内容?
  764. ## 评估分类
  765. - irrelevant: task与当前任务无关
  766. - unused: 相关但未使用
  767. - helpful: 有帮助
  768. - harmful: 有负面作用
  769. - neutral: 无明显作用
  770. ## 输出格式
  771. 请直接输出评估结果,使用JSON格式:
  772. {{
  773. "evaluations": [
  774. {{
  775. "knowledge_id": "knowledge-xxx",
  776. "eval_status": "helpful",
  777. "reason": "1-2句评估理由"
  778. }}
  779. ]
  780. }}
  781. """
  782. return prompt
  783. async def _single_turn_compress(
  784. self,
  785. trace_id: str,
  786. history: List[Dict],
  787. goal_tree: Optional[GoalTree],
  788. config: RunConfig,
  789. ) -> str:
  790. """单次 LLM 调用生成压缩摘要,返回 summary 文本"""
  791. self.log.info("执行单次 LLM 压缩")
  792. # 构建压缩 prompt(使用 SINGLE_TURN_PROMPT)
  793. from agent.core.prompts import build_single_turn_prompt
  794. goal_prompt = goal_tree.to_prompt(include_summary=True) if goal_tree else ""
  795. compress_prompt = build_single_turn_prompt(goal_prompt)
  796. compress_messages = list(history) + [
  797. {"role": "user", "content": compress_prompt}
  798. ]
  799. # 应用 Prompt Caching
  800. compress_messages = self._add_cache_control(
  801. compress_messages, config.model, config.enable_prompt_caching
  802. )
  803. # 单次 LLM 调用(无工具)
  804. result = await self.llm_call(
  805. messages=compress_messages,
  806. model=config.model,
  807. tools=[], # 不提供工具
  808. temperature=config.temperature,
  809. **config.extra_llm_params,
  810. )
  811. summary_text = result.get("content", "").strip()
  812. # 提取 [[SUMMARY]] 块
  813. if "[[SUMMARY]]" in summary_text:
  814. summary_text = summary_text[
  815. summary_text.index("[[SUMMARY]]") + len("[[SUMMARY]]"):
  816. ].strip()
  817. return summary_text
  818. @staticmethod
  819. def _try_fix_json(s: str) -> Optional[dict]:
  820. """尝试修复常见的 JSON 截断/格式问题,返回 dict 或 None"""
  821. import re
  822. fixed = s.strip()
  823. # 1. 修复值中未转义的引号(如 "key": "he said "hello" to me")
  824. # 策略:找到 key-value 模式中值字符串内部的裸引号并转义
  825. def _fix_inner_quotes(text: str) -> str:
  826. # 匹配 ": "..." 模式,修复值内部的未转义引号
  827. result = []
  828. i = 0
  829. while i < len(text):
  830. # 找到 ": " 后面的值字符串开头
  831. if text[i] == '"':
  832. # 找到这个引号对应的字符串结束位置
  833. j = i + 1
  834. while j < len(text):
  835. if text[j] == '\\':
  836. j += 2 # 跳过转义字符
  837. continue
  838. if text[j] == '"':
  839. break
  840. j += 1
  841. # 检查引号后面是否是合法的 JSON 分隔符
  842. if j < len(text):
  843. after = j + 1
  844. # 跳过空白
  845. while after < len(text) and text[after] in ' \t\n\r':
  846. after += 1
  847. if after < len(text) and text[after] not in ':,}]\n\r':
  848. # 这个引号不是真正的结束引号,继续往后找
  849. # 找到下一个后面跟合法分隔符的引号
  850. k = j + 1
  851. found_end = False
  852. while k < len(text):
  853. if text[k] == '"':
  854. peek = k + 1
  855. while peek < len(text) and text[peek] in ' \t\n\r':
  856. peek += 1
  857. if peek >= len(text) or text[peek] in ':,}]':
  858. # 这才是真正的结束引号,转义中间的引号
  859. inner = text[i+1:k].replace('"', '\\"')
  860. result.append('"' + inner + '"')
  861. i = k + 1
  862. found_end = True
  863. break
  864. k += 1
  865. if found_end:
  866. continue
  867. result.append(text[i])
  868. i += 1
  869. return ''.join(result)
  870. fixed = _fix_inner_quotes(fixed)
  871. # 2. 去掉尾部多余逗号
  872. fixed = re.sub(r',\s*([}\]])', r'\1', fixed)
  873. # 3. 尝试补全截断的字符串和括号
  874. for suffix in ['', '"', '"}', '"]', '"}]', '"}}']:
  875. try:
  876. attempt = fixed + suffix
  877. open_braces = attempt.count('{') - attempt.count('}')
  878. open_brackets = attempt.count('[') - attempt.count(']')
  879. attempt += '}' * max(0, open_braces) + ']' * max(0, open_brackets)
  880. result = json.loads(attempt)
  881. if isinstance(result, dict):
  882. self.log.info(f"[JSON Fix] 成功修复 JSON (suffix={repr(suffix)})")
  883. return result
  884. except json.JSONDecodeError:
  885. continue
  886. return None
  887. async def _agent_loop(
  888. self,
  889. trace: Trace,
  890. history: List[Dict],
  891. goal_tree: Optional[GoalTree],
  892. config: RunConfig,
  893. sequence: int,
  894. inject_skills: Optional[List[str]] = None,
  895. skill_recency_threshold: int = 10,
  896. ) -> AsyncIterator[Union[Trace, Message]]:
  897. """ReAct 循环"""
  898. trace_id = trace.trace_id
  899. tool_schemas = self._get_tool_schemas(config.tools, config.tool_groups, config.exclude_tools)
  900. # 当前主路径头节点的 sequence(用于设置 parent_sequence)
  901. head_seq = trace.head_sequence
  902. # 侧分支状态(None = 主路径)
  903. side_branch_ctx: Optional[SideBranchContext] = None
  904. # 检查是否有未完成的侧分支需要恢复
  905. if trace.context.get("active_side_branch"):
  906. side_branch_data = trace.context["active_side_branch"]
  907. branch_id = side_branch_data["branch_id"]
  908. start_sequence = side_branch_data["start_sequence"]
  909. # 从数据库查询侧分支消息(按 sequence 范围)
  910. if self.trace_store:
  911. all_messages = await self.trace_store.get_trace_messages(trace_id)
  912. side_messages = [
  913. m for m in all_messages
  914. if m.sequence >= start_sequence
  915. ]
  916. # 恢复侧分支上下文
  917. side_branch_ctx = SideBranchContext(
  918. type=side_branch_data["type"],
  919. branch_id=branch_id,
  920. start_head_seq=side_branch_data["start_head_seq"],
  921. start_sequence=side_branch_data["start_sequence"],
  922. start_history_length=0, # 稍后重新计算
  923. start_iteration=side_branch_data.get("start_iteration", 0),
  924. max_turns=side_branch_data.get("max_turns", config.side_branch_max_turns),
  925. )
  926. self.log.info(
  927. f"恢复未完成的侧分支: {side_branch_ctx.type}, "
  928. f"max_turns={side_branch_ctx.max_turns}"
  929. )
  930. # 将侧分支消息追加到 history
  931. for m in side_messages:
  932. history.append(m.to_llm_dict())
  933. # 重新计算 start_history_length
  934. side_branch_ctx.start_history_length = len(history) - len(side_messages)
  935. break_after_side_branch = False # 侧分支退出后是否 break 主循环
  936. for iteration in range(config.max_iterations):
  937. # 更新活动时间(表明trace正在活跃运行)
  938. if self.trace_store:
  939. await self.trace_store.update_trace(
  940. trace_id,
  941. last_activity_at=datetime.now()
  942. )
  943. # 检查取消信号
  944. cancel_event = self._cancel_events.get(trace_id)
  945. if cancel_event and cancel_event.is_set():
  946. self.log.info(f"Trace {trace_id} stopped by user")
  947. if self.trace_store:
  948. await self.trace_store.update_trace(
  949. trace_id,
  950. status="stopped",
  951. head_sequence=head_seq,
  952. completed_at=datetime.now(),
  953. )
  954. # 广播状态变化给前端
  955. try:
  956. from agent.trace.websocket import broadcast_trace_status_changed
  957. await broadcast_trace_status_changed(trace_id, "stopped")
  958. except Exception:
  959. pass
  960. trace_obj = await self.trace_store.get_trace(trace_id)
  961. if trace_obj:
  962. yield trace_obj
  963. return
  964. # 检查Goal完成触发的知识评估
  965. if not side_branch_ctx and self.trace_store:
  966. trace = await self.trace_store.get_trace(trace_id)
  967. if trace and trace.context and trace.context.get("pending_knowledge_eval"):
  968. # 清除标志
  969. trace.context.pop("pending_knowledge_eval", None)
  970. await self.trace_store.update_trace(trace_id, context=trace.context)
  971. # 设置侧分支队列
  972. config.force_side_branch = ["knowledge_eval"]
  973. self.log.info("[Knowledge Eval] 检测到Goal完成触发,进入知识评估侧分支")
  974. # Context 管理(仅主路径)
  975. needs_enter_side_branch = False
  976. if not side_branch_ctx:
  977. # 侧分支退出后需要 break 主循环
  978. if break_after_side_branch and not config.force_side_branch:
  979. break
  980. # 检查是否强制进入侧分支(API 手动触发或自动压缩流程)
  981. if config.force_side_branch:
  982. needs_enter_side_branch = True
  983. self.log.info(f"强制进入侧分支: {config.force_side_branch}")
  984. else:
  985. # 正常的 context 管理逻辑
  986. history, head_seq, sequence, needs_enter_side_branch = await self._manage_context_usage(
  987. trace_id, history, goal_tree, config, sequence, head_seq
  988. )
  989. # 进入侧分支
  990. if needs_enter_side_branch and not side_branch_ctx:
  991. # 刷新 trace,获取 _manage_context_usage 可能写入 DB 的 knowledge_eval_trigger
  992. if self.trace_store:
  993. fresh = await self.trace_store.get_trace(trace_id)
  994. if fresh:
  995. trace = fresh
  996. # 从队列中取出第一个侧分支类型
  997. branch_type: Literal["compression", "reflection", "knowledge_eval"]
  998. if config.force_side_branch and isinstance(config.force_side_branch, list) and len(config.force_side_branch) > 0:
  999. branch_type = config.force_side_branch.pop(0) # type: ignore
  1000. self.log.info(f"从队列取出侧分支: {branch_type}, 剩余队列: {config.force_side_branch}")
  1001. elif config.knowledge.enable_extraction:
  1002. # 兼容旧的单值模式(如果 force_side_branch 是字符串)
  1003. branch_type = "reflection"
  1004. else:
  1005. # 自动触发:压缩
  1006. branch_type = "compression"
  1007. branch_id = f"{branch_type}_{uuid.uuid4().hex[:8]}"
  1008. side_branch_ctx = SideBranchContext(
  1009. type=branch_type,
  1010. branch_id=branch_id,
  1011. start_head_seq=head_seq,
  1012. start_sequence=sequence,
  1013. start_history_length=len(history),
  1014. start_iteration=iteration,
  1015. max_turns=config.side_branch_max_turns,
  1016. )
  1017. # 持久化侧分支状态
  1018. if self.trace_store:
  1019. # 获取触发事件(如果是 knowledge_eval 分支)
  1020. trigger_event = trace.context.get("knowledge_eval_trigger", "unknown") if branch_type == "knowledge_eval" else None
  1021. trace.context["active_side_branch"] = {
  1022. "type": side_branch_ctx.type,
  1023. "branch_id": side_branch_ctx.branch_id,
  1024. "start_head_seq": side_branch_ctx.start_head_seq,
  1025. "start_sequence": side_branch_ctx.start_sequence,
  1026. "start_iteration": side_branch_ctx.start_iteration,
  1027. "max_turns": side_branch_ctx.max_turns,
  1028. "started_at": datetime.now().isoformat(),
  1029. }
  1030. # 如果是 knowledge_eval 分支,添加 trigger_event
  1031. if trigger_event:
  1032. trace.context["active_side_branch"]["trigger_event"] = trigger_event
  1033. # 清除触发事件标记
  1034. trace.context.pop("knowledge_eval_trigger", None)
  1035. await self.trace_store.update_trace(
  1036. trace_id,
  1037. context=trace.context
  1038. )
  1039. # 追加侧分支 prompt
  1040. if branch_type == "reflection":
  1041. # 完成场景用全局复盘 prompt,压缩场景用阶段性反思 prompt
  1042. if break_after_side_branch:
  1043. prompt = config.knowledge.get_completion_reflect_prompt()
  1044. else:
  1045. prompt = config.knowledge.get_reflect_prompt()
  1046. elif branch_type == "knowledge_eval":
  1047. prompt = await self._build_knowledge_eval_prompt(trace_id, goal_tree)
  1048. else: # compression
  1049. from agent.trace.compaction import build_compression_prompt
  1050. prompt = build_compression_prompt(goal_tree)
  1051. branch_user_msg = Message.create(
  1052. trace_id=trace_id,
  1053. role="user",
  1054. sequence=sequence,
  1055. parent_sequence=head_seq,
  1056. goal_id=goal_tree.current_id if goal_tree else None,
  1057. branch_type=branch_type,
  1058. branch_id=branch_id,
  1059. content=prompt,
  1060. )
  1061. if self.trace_store:
  1062. await self.trace_store.add_message(branch_user_msg)
  1063. history.append(branch_user_msg.to_llm_dict())
  1064. head_seq = sequence
  1065. sequence += 1
  1066. self.log.info(f"进入侧分支: {branch_type}, branch_id={branch_id}")
  1067. continue # 跳过本轮,下一轮开始侧分支
  1068. # 构建 LLM messages(注入上下文,移除内部字段)
  1069. llm_messages = [{k: v for k, v in msg.items() if not k.startswith("_")} for msg in history]
  1070. # 优化已处理的图片(分级处理:保留/压缩/描述)
  1071. llm_messages = await self._optimize_images(llm_messages, config.model)
  1072. # 对历史消息应用 Prompt Caching
  1073. llm_messages = self._add_cache_control(
  1074. llm_messages,
  1075. config.model,
  1076. config.enable_prompt_caching
  1077. )
  1078. # 调用 LLM(等待完成后再检查 cancel 信号,不中断正在进行的调用)
  1079. result = await self.llm_call(
  1080. messages=llm_messages,
  1081. model=config.model,
  1082. tools=tool_schemas,
  1083. temperature=config.temperature,
  1084. **config.extra_llm_params,
  1085. )
  1086. response_content = result.get("content", "")
  1087. reasoning_content = result.get("reasoning_content", "")
  1088. tool_calls = result.get("tool_calls")
  1089. finish_reason = result.get("finish_reason")
  1090. prompt_tokens = result.get("prompt_tokens", 0)
  1091. completion_tokens = result.get("completion_tokens", 0)
  1092. step_cost = result.get("cost", 0)
  1093. cache_creation_tokens = result.get("cache_creation_tokens")
  1094. cache_read_tokens = result.get("cache_read_tokens")
  1095. # 周期性自动注入上下文(仅主路径)
  1096. if not side_branch_ctx and iteration % CONTEXT_INJECTION_INTERVAL == 0:
  1097. # 检查是否已经调用了 get_current_context
  1098. if tool_calls:
  1099. has_context_call = any(
  1100. tc.get("function", {}).get("name") == "get_current_context"
  1101. for tc in tool_calls
  1102. )
  1103. else:
  1104. has_context_call = False
  1105. tool_calls = []
  1106. if not has_context_call:
  1107. # 手动添加 get_current_context 工具调用
  1108. context_call_id = f"call_context_{uuid.uuid4().hex[:8]}"
  1109. tool_calls.append({
  1110. "id": context_call_id,
  1111. "type": "function",
  1112. "function": {"name": "get_current_context", "arguments": "{}"}
  1113. })
  1114. self.log.info(f"[周期性注入] 自动添加 get_current_context 工具调用 (iteration={iteration})")
  1115. # Skill 指定注入(仅主路径,首轮 iteration==0 时执行)
  1116. if not side_branch_ctx and inject_skills and iteration == 0:
  1117. skills_to_inject = self._check_skills_need_injection(
  1118. trace, inject_skills, history, skill_recency_threshold
  1119. )
  1120. if skills_to_inject:
  1121. if not tool_calls:
  1122. tool_calls = []
  1123. for skill_name in skills_to_inject:
  1124. skill_call_id = f"call_skill_{skill_name}_{uuid.uuid4().hex[:8]}"
  1125. tool_calls.append({
  1126. "id": skill_call_id,
  1127. "type": "function",
  1128. "function": {
  1129. "name": "skill",
  1130. "arguments": json.dumps({"skill_name": skill_name})
  1131. }
  1132. })
  1133. self.log.info(f"[Skill 指定注入] 自动添加 skill(\"{skill_name}\") 工具调用")
  1134. # 按需自动创建 root goal(仅主路径)
  1135. if not side_branch_ctx and goal_tree and not goal_tree.goals and tool_calls:
  1136. has_goal_call = any(
  1137. tc.get("function", {}).get("name") == "goal"
  1138. for tc in tool_calls
  1139. )
  1140. self.log.debug(f"[Auto Root Goal] Before tool execution: goal_tree.goals={len(goal_tree.goals)}, has_goal_call={has_goal_call}, tool_calls={[tc.get('function', {}).get('name') for tc in tool_calls]}")
  1141. if not has_goal_call:
  1142. mission = goal_tree.mission
  1143. root_desc = mission[:200] if len(mission) > 200 else mission
  1144. goal_tree.add_goals(
  1145. descriptions=[root_desc],
  1146. reasons=["系统自动创建:Agent 未显式创建目标"],
  1147. parent_id=None
  1148. )
  1149. if self.trace_store:
  1150. await self.trace_store.add_goal(trace_id, goal_tree.goals[0])
  1151. await self.trace_store.update_goal_tree(trace_id, goal_tree)
  1152. self.log.info(f"自动创建 root goal: {goal_tree.goals[0].id}(未自动 focus,等待模型决定)")
  1153. else:
  1154. self.log.debug(f"[Auto Root Goal] 检测到 goal 工具调用,跳过自动创建")
  1155. # 获取当前 goal_id
  1156. current_goal_id = goal_tree.current_id if (goal_tree and goal_tree.current_id) else None
  1157. # 记录 assistant Message(parent_sequence 指向当前 head)
  1158. assistant_msg = Message.create(
  1159. trace_id=trace_id,
  1160. role="assistant",
  1161. sequence=sequence,
  1162. goal_id=current_goal_id,
  1163. parent_sequence=head_seq if head_seq > 0 else None,
  1164. branch_type=side_branch_ctx.type if side_branch_ctx else None,
  1165. branch_id=side_branch_ctx.branch_id if side_branch_ctx else None,
  1166. content={"text": response_content, "tool_calls": tool_calls, "reasoning_content": reasoning_content or None},
  1167. prompt_tokens=prompt_tokens,
  1168. completion_tokens=completion_tokens,
  1169. cache_creation_tokens=cache_creation_tokens,
  1170. cache_read_tokens=cache_read_tokens,
  1171. finish_reason=finish_reason,
  1172. cost=step_cost,
  1173. )
  1174. if self.trace_store:
  1175. await self.trace_store.add_message(assistant_msg)
  1176. # 记录模型使用
  1177. await self.trace_store.record_model_usage(
  1178. trace_id=trace_id,
  1179. sequence=sequence,
  1180. role="assistant",
  1181. model=config.model,
  1182. prompt_tokens=prompt_tokens,
  1183. completion_tokens=completion_tokens,
  1184. cache_read_tokens=cache_read_tokens or 0,
  1185. )
  1186. # 知识评估侧分支:即时检测并写入评估结果
  1187. if side_branch_ctx and side_branch_ctx.type == "knowledge_eval":
  1188. text = response_content if isinstance(response_content, str) else ""
  1189. eval_results = None
  1190. try:
  1191. eval_results = json.loads(text.strip())
  1192. if "evaluations" not in eval_results:
  1193. eval_results = None
  1194. except json.JSONDecodeError:
  1195. import re
  1196. json_match = re.search(r'```json\s*(\{.*?\})\s*```', text, re.DOTALL)
  1197. if json_match:
  1198. try:
  1199. eval_results = json.loads(json_match.group(1))
  1200. except json.JSONDecodeError:
  1201. pass
  1202. if not eval_results:
  1203. json_match = re.search(r'\{[^{]*"evaluations"[^}]*\[[^\]]*\][^}]*\}', text, re.DOTALL)
  1204. if json_match:
  1205. try:
  1206. eval_results = json.loads(json_match.group(0))
  1207. except json.JSONDecodeError:
  1208. pass
  1209. if eval_results and self.trace_store:
  1210. current_trace = await self.trace_store.get_trace(trace_id)
  1211. trigger_event = current_trace.context.get("active_side_branch", {}).get("trigger_event", "unknown")
  1212. for eval_item in eval_results.get("evaluations", []):
  1213. await self.trace_store.update_knowledge_evaluation(
  1214. trace_id=trace_id,
  1215. knowledge_id=eval_item["knowledge_id"],
  1216. eval_result={
  1217. "eval_status": eval_item["eval_status"],
  1218. "reason": eval_item.get("reason", "")
  1219. },
  1220. trigger_event=trigger_event
  1221. )
  1222. self.log.info(f"[Knowledge Eval] 已写入 {len(eval_results.get('evaluations', []))} 条评估结果")
  1223. # 如果在侧分支,记录到 assistant_msg(已持久化,不需要额外维护)
  1224. yield assistant_msg
  1225. head_seq = sequence
  1226. sequence += 1
  1227. # 检查侧分支是否应该退出
  1228. if side_branch_ctx:
  1229. # 计算侧分支已执行的轮次
  1230. turns_in_branch = iteration - side_branch_ctx.start_iteration
  1231. should_exit = turns_in_branch >= side_branch_ctx.max_turns or not tool_calls
  1232. if turns_in_branch >= side_branch_ctx.max_turns:
  1233. self.log.warning(
  1234. f"侧分支 {side_branch_ctx.type} 达到最大轮次 "
  1235. f"{side_branch_ctx.max_turns},强制退出"
  1236. )
  1237. if should_exit and side_branch_ctx.type == "compression":
  1238. # === 压缩侧分支退出(超时 + 正常完成统一处理)===
  1239. summary_text = ""
  1240. # 1. 从当前回复提取
  1241. if response_content:
  1242. if "[[SUMMARY]]" in response_content:
  1243. summary_text = response_content[
  1244. response_content.index("[[SUMMARY]]") + len("[[SUMMARY]]"):
  1245. ].strip()
  1246. elif response_content.strip():
  1247. summary_text = response_content.strip()
  1248. # 2. 从持久化存储按 sequence 范围查询
  1249. if not summary_text and self.trace_store:
  1250. all_messages = await self.trace_store.get_trace_messages(trace_id)
  1251. side_messages = [
  1252. m for m in all_messages
  1253. if m.sequence >= side_branch_ctx.start_sequence
  1254. ]
  1255. for msg in reversed(side_messages):
  1256. if msg.role == "assistant" and isinstance(msg.content, dict):
  1257. text = msg.content.get("text", "")
  1258. if "[[SUMMARY]]" in text:
  1259. summary_text = text[text.index("[[SUMMARY]]") + len("[[SUMMARY]]"):].strip()
  1260. break
  1261. elif text:
  1262. summary_text = text
  1263. break
  1264. # 3. 单次 LLM 调用
  1265. if not summary_text:
  1266. self.log.warning("侧分支未生成有效 summary,fallback 到单次 LLM 压缩")
  1267. pre_branch_history = history[:side_branch_ctx.start_history_length]
  1268. summary_text = await self._single_turn_compress(
  1269. trace_id, pre_branch_history, goal_tree, config,
  1270. )
  1271. # 创建主路径 summary 消息并重建 history
  1272. if summary_text:
  1273. # 清理侧分支指令,防止泄露到主分支
  1274. summary_text = summary_text.replace(
  1275. "**生成摘要后立即停止,不要继续执行原有任务。**", ""
  1276. ).strip()
  1277. from agent.core.prompts import build_summary_header
  1278. summary_content = build_summary_header(summary_text)
  1279. if goal_tree and goal_tree.goals:
  1280. goal_tree_detail = goal_tree.to_prompt(include_summary=True)
  1281. summary_content += f"\n\n## Current Plan\n\n{goal_tree_detail}"
  1282. # 找第一条 user message 的 sequence 作为 parent
  1283. # 续跑时 get_main_path_messages 沿 parent 链回溯,
  1284. # 指向 first_user 可以跳过所有被压缩的中间消息
  1285. first_user_seq = None
  1286. if self.trace_store:
  1287. all_msgs = await self.trace_store.get_trace_messages(trace_id)
  1288. for m in all_msgs:
  1289. if m.role == "user":
  1290. first_user_seq = m.sequence
  1291. break
  1292. summary_msg = Message.create(
  1293. trace_id=trace_id,
  1294. role="user",
  1295. sequence=sequence,
  1296. parent_sequence=first_user_seq,
  1297. branch_type=None,
  1298. content=summary_content,
  1299. )
  1300. if self.trace_store:
  1301. await self.trace_store.add_message(summary_msg)
  1302. history = self._rebuild_history_after_compression(
  1303. history, summary_msg.to_llm_dict(), label="压缩侧分支"
  1304. )
  1305. head_seq = sequence
  1306. sequence += 1
  1307. else:
  1308. self.log.error("所有压缩方案均未生成有效 summary,跳过压缩")
  1309. # 回退 history 到侧分支开始前,防止侧分支指令泄露到主分支
  1310. history = history[:side_branch_ctx.start_history_length]
  1311. head_seq = side_branch_ctx.start_head_seq
  1312. # 清理
  1313. trace.context.pop("active_side_branch", None)
  1314. config.force_side_branch = None
  1315. if self.trace_store:
  1316. await self.trace_store.update_trace(
  1317. trace_id, context=trace.context, head_sequence=head_seq,
  1318. )
  1319. side_branch_ctx = None
  1320. continue
  1321. elif should_exit and side_branch_ctx.type == "reflection":
  1322. # === 反思侧分支退出(超时 + 正常完成统一处理)===
  1323. self.log.info("反思侧分支退出")
  1324. # auto-commit hook:默认 pending 要等人工 review,
  1325. # 但 reflect_auto_commit=True 时视作全部 approved,直接批量 upload。
  1326. if (
  1327. self.trace_store
  1328. and getattr(config.knowledge, "reflect_auto_commit", False)
  1329. ):
  1330. try:
  1331. from agent.trace.extraction_review import auto_commit_branch
  1332. report = await auto_commit_branch(
  1333. self.trace_store,
  1334. trace_id,
  1335. side_branch_ctx.branch_id,
  1336. )
  1337. if report.committed or report.failed:
  1338. self.log.info(
  1339. f"[auto-commit] committed={len(report.committed)} "
  1340. f"failed={len(report.failed)} skipped={len(report.skipped)}"
  1341. )
  1342. except Exception as e:
  1343. self.log.error(f"[auto-commit] 反思分支自动提交失败: {e}")
  1344. # 恢复主路径
  1345. if self.trace_store:
  1346. main_path_messages = await self.trace_store.get_main_path_messages(
  1347. trace_id, side_branch_ctx.start_head_seq
  1348. )
  1349. history = [m.to_llm_dict() for m in main_path_messages]
  1350. head_seq = side_branch_ctx.start_head_seq
  1351. # 清理
  1352. trace.context.pop("active_side_branch", None)
  1353. if not config.force_side_branch or len(config.force_side_branch) == 0:
  1354. config.force_side_branch = None
  1355. self.log.info("反思完成,队列为空")
  1356. if self.trace_store:
  1357. await self.trace_store.update_trace(
  1358. trace_id, context=trace.context, head_sequence=head_seq,
  1359. )
  1360. side_branch_ctx = None
  1361. continue
  1362. elif should_exit and side_branch_ctx.type == "knowledge_eval":
  1363. # === 知识评估侧分支退出 ===
  1364. self.log.info("知识评估侧分支退出")
  1365. # 恢复主路径
  1366. if self.trace_store:
  1367. main_path_messages = await self.trace_store.get_main_path_messages(
  1368. trace_id, side_branch_ctx.start_head_seq
  1369. )
  1370. history = [m.to_llm_dict() for m in main_path_messages]
  1371. head_seq = side_branch_ctx.start_head_seq
  1372. # 清理
  1373. trace.context.pop("active_side_branch", None)
  1374. if not config.force_side_branch or len(config.force_side_branch) == 0:
  1375. config.force_side_branch = None
  1376. self.log.info("知识评估完成,队列为空")
  1377. if self.trace_store:
  1378. await self.trace_store.update_trace(
  1379. trace_id, context=trace.context, head_sequence=head_seq,
  1380. )
  1381. side_branch_ctx = None
  1382. continue
  1383. # 处理工具调用
  1384. # 截断兜底:finish_reason == "length" 说明响应被 max_tokens 截断,
  1385. # tool call 参数很可能不完整,不应执行,改为提示模型分批操作
  1386. if tool_calls and finish_reason == "length":
  1387. self.log.warning(
  1388. "[Runner] 响应被 max_tokens 截断,跳过 %d 个不完整的 tool calls",
  1389. len(tool_calls),
  1390. )
  1391. truncation_hint = TRUNCATION_HINT
  1392. history.append({
  1393. "role": "assistant",
  1394. "content": response_content,
  1395. "tool_calls": tool_calls,
  1396. })
  1397. # 为每个被截断的 tool call 返回错误结果
  1398. for tc in tool_calls:
  1399. history.append({
  1400. "role": "tool",
  1401. "tool_call_id": tc["id"],
  1402. "content": truncation_hint,
  1403. })
  1404. continue
  1405. if tool_calls and config.auto_execute_tools:
  1406. history.append({
  1407. "role": "assistant",
  1408. "content": response_content,
  1409. "tool_calls": tool_calls,
  1410. })
  1411. if config.parallel_tool_execution:
  1412. # === 并发执行 ===
  1413. current_goal_id = goal_tree.current_id if (goal_tree and goal_tree.current_id) else None
  1414. async def _execute_single_tool(tc: dict) -> tuple:
  1415. tool_name = tc["function"]["name"]
  1416. tool_args = tc["function"]["arguments"]
  1417. if isinstance(tool_args, str):
  1418. if not tool_args.strip():
  1419. tool_args = {}
  1420. else:
  1421. try:
  1422. tool_args = json.loads(tool_args)
  1423. except json.JSONDecodeError:
  1424. tool_args = self._try_fix_json(tool_args)
  1425. if tool_args is None:
  1426. self.log.warning(f"[Tool Call] JSON 解析失败: {tc['function']['arguments'][:200]}")
  1427. tc["function"]["arguments"] = json.dumps({"_error": "JSON parse failed", "_raw": tc["function"]["arguments"][:200]}, ensure_ascii=False)
  1428. return (tc, None, f"Error: 工具参数 JSON 格式错误,无法解析。原始参数: {tc['function']['arguments'][:200]}")
  1429. elif tool_args is None:
  1430. tool_args = {}
  1431. args_str = json.dumps(tool_args, ensure_ascii=False)
  1432. args_display = args_str[:100] + "..." if len(args_str) > 100 else args_str
  1433. self.log.info(f"[Tool Call] {tool_name}({args_display})")
  1434. trigger_event_for_tool = None
  1435. if side_branch_ctx and side_branch_ctx.type == "knowledge_eval" and self.trace_store:
  1436. current_trace = await self.trace_store.get_trace(trace_id)
  1437. if current_trace:
  1438. trigger_event_for_tool = current_trace.context.get("active_side_branch", {}).get("trigger_event", "unknown")
  1439. if tool_name in ("toolhub_call", "toolhub_search", "toolhub_health"):
  1440. try:
  1441. from agent.tools.builtin.toolhub import set_trace_context
  1442. set_trace_context(trace_id)
  1443. except ImportError:
  1444. pass
  1445. try:
  1446. tool_result = await self.tools.execute(
  1447. tool_name, tool_args, uid=config.uid or "",
  1448. context={"store": self.trace_store, "trace_id": trace_id, "goal_id": current_goal_id, "runner": self, "goal_tree": goal_tree, "knowledge_config": config.knowledge, "sequence": sequence, "side_branch": {"type": side_branch_ctx.type, "branch_id": side_branch_ctx.branch_id, "is_side_branch": True, "max_turns": side_branch_ctx.max_turns, "trigger_event": trigger_event_for_tool} if side_branch_ctx else None, **(config.context or {})}
  1449. )
  1450. return (tc, tool_args, tool_result)
  1451. except Exception as e:
  1452. import traceback
  1453. return (tc, tool_args, f"Error executing tool {tool_name}: {str(e)}\n{traceback.format_exc()}")
  1454. tasks = [_execute_single_tool(tc) for tc in tool_calls]
  1455. results = await asyncio.gather(*tasks)
  1456. for res in results:
  1457. tc, tool_args, tool_result = res
  1458. tool_name = tc["function"]["name"]
  1459. if tool_args is None:
  1460. history.append({"role": "tool", "tool_call_id": tc["id"], "name": tool_name, "content": tool_result})
  1461. yield Message.create(trace_id=trace_id, role="tool", sequence=sequence, parent_sequence=head_seq, tool_call_id=tc["id"], content=tool_result)
  1462. head_seq = sequence
  1463. sequence += 1
  1464. continue
  1465. if tool_name == "goal" and goal_tree:
  1466. self.log.debug(f"[Goal Tool] After execution: goal_tree.goals={len(goal_tree.goals)}, current_id={goal_tree.current_id}")
  1467. if tool_name == "upload_knowledge" and isinstance(tool_result, dict):
  1468. self.log.info(f"[Knowledge Tracking] 知识已上传")
  1469. if isinstance(tool_result, str):
  1470. tool_result = {"text": tool_result}
  1471. elif not isinstance(tool_result, dict):
  1472. tool_result = {"text": str(tool_result)}
  1473. tool_text = tool_result.get("text", str(tool_result))
  1474. tool_images = tool_result.get("images", [])
  1475. tool_usage = tool_result.get("tool_usage")
  1476. if tool_images:
  1477. tool_result_text = tool_text
  1478. tool_content_for_llm = [{"type": "text", "text": tool_text}]
  1479. for img in tool_images:
  1480. if img.get("type") == "base64" and img.get("data"):
  1481. media_type = img.get("media_type", "image/png")
  1482. tool_content_for_llm.append({"type": "image_url", "image_url": {"url": f"data:{media_type};base64,{img['data']}"}})
  1483. elif img.get("type") == "url" and img.get("url"):
  1484. tool_content_for_llm.append({"type": "image_url", "image_url": {"url": img["url"]}})
  1485. else:
  1486. tool_result_text = tool_text
  1487. tool_content_for_llm = tool_text
  1488. tool_msg = Message.create(trace_id=trace_id, role="tool", sequence=sequence, goal_id=current_goal_id, parent_sequence=head_seq, tool_call_id=tc["id"], branch_type=side_branch_ctx.type if side_branch_ctx else None, branch_id=side_branch_ctx.branch_id if side_branch_ctx else None, content={"tool_name": tool_name, "result": tool_content_for_llm})
  1489. if self.trace_store:
  1490. await self.trace_store.add_message(tool_msg)
  1491. if tool_usage:
  1492. await self.trace_store.record_model_usage(trace_id=trace_id, sequence=sequence, role="tool", tool_name=tool_name, model=tool_usage.get("model"), prompt_tokens=tool_usage.get("prompt_tokens", 0), completion_tokens=tool_usage.get("completion_tokens", 0), cache_read_tokens=tool_usage.get("cache_read_tokens", 0))
  1493. if tool_images:
  1494. import base64 as b64mod
  1495. for img in tool_images:
  1496. if img.get("data"):
  1497. png_path = self.trace_store._get_messages_dir(trace_id) / f"{tool_msg.message_id}.png"
  1498. png_path.write_bytes(b64mod.b64decode(img["data"]))
  1499. break
  1500. yield tool_msg
  1501. head_seq = sequence
  1502. sequence += 1
  1503. history.append({"role": "tool", "tool_call_id": tc["id"], "name": tool_name, "content": tool_content_for_llm, "_message_id": tool_msg.message_id})
  1504. if tool_name == "skill" and tc["id"].startswith("call_skill_"):
  1505. try:
  1506. skill_args = json.loads(tc["function"]["arguments"]) if isinstance(tc["function"]["arguments"], str) else tc["function"]["arguments"]
  1507. injected_skill_name = skill_args.get("skill_name", "")
  1508. if injected_skill_name:
  1509. await self._update_skill_injection_record(trace_id, trace, injected_skill_name, tool_msg.message_id, tool_msg.sequence)
  1510. self.log.info(f"[Skill 指定注入] 已记录 {injected_skill_name} → msg={tool_msg.message_id}")
  1511. except Exception as e:
  1512. self.log.warning(f"[Skill 指定注入] 记录追踪失败: {e}")
  1513. else:
  1514. for tc in tool_calls:
  1515. current_goal_id = goal_tree.current_id if (goal_tree and goal_tree.current_id) else None
  1516. tool_name = tc["function"]["name"]
  1517. tool_args = tc["function"]["arguments"]
  1518. if isinstance(tool_args, str):
  1519. if not tool_args.strip():
  1520. tool_args = {}
  1521. else:
  1522. try:
  1523. tool_args = json.loads(tool_args)
  1524. except json.JSONDecodeError:
  1525. # 尝试修复常见的截断/格式问题
  1526. tool_args = self._try_fix_json(tool_args)
  1527. if tool_args is None:
  1528. self.log.warning(f"[Tool Call] JSON 解析失败,跳过工具调用 {tool_name}: {tc['function']['arguments'][:200]}")
  1529. # 修复 history 中 assistant message 里的残缺 JSON,
  1530. # 避免 Qwen API 拒绝 "function.arguments must be in JSON format"
  1531. tc["function"]["arguments"] = json.dumps(
  1532. {"_error": "JSON parse failed", "_raw": tc["function"]["arguments"][:200]},
  1533. ensure_ascii=False,
  1534. )
  1535. history.append({
  1536. "role": "tool",
  1537. "tool_call_id": tc["id"],
  1538. "content": f"Error: 工具参数 JSON 格式错误,无法解析。请重新生成正确的 JSON 参数调用此工具。原始参数: {tc['function']['arguments'][:200]}",
  1539. })
  1540. # 注意:这里不 yield Message,因为缺少必需参数会导致错误
  1541. # yield Message 应该由 trace_store 统一管理
  1542. continue
  1543. elif tool_args is None:
  1544. tool_args = {}
  1545. # 记录工具调用(INFO 级别,显示参数)
  1546. args_str = json.dumps(tool_args, ensure_ascii=False)
  1547. args_display = args_str[:100] + "..." if len(args_str) > 100 else args_str
  1548. self.log.info(f"[Tool Call] {tool_name}({args_display})")
  1549. # 获取trigger_event(如果在knowledge_eval侧分支中)
  1550. trigger_event_for_tool = None
  1551. if side_branch_ctx and side_branch_ctx.type == "knowledge_eval" and self.trace_store:
  1552. current_trace = await self.trace_store.get_trace(trace_id)
  1553. if current_trace:
  1554. trigger_event_for_tool = current_trace.context.get("active_side_branch", {}).get("trigger_event", "unknown")
  1555. # 设置 trace_id 上下文供 toolhub 使用(图片保存到 outputs/{trace_id}/)
  1556. if tool_name in ("toolhub_call", "toolhub_search", "toolhub_health"):
  1557. try:
  1558. from agent.tools.builtin.toolhub import set_trace_context
  1559. set_trace_context(trace_id)
  1560. except ImportError:
  1561. pass
  1562. tool_result = await self.tools.execute(
  1563. tool_name,
  1564. tool_args,
  1565. uid=config.uid or "",
  1566. context={
  1567. "store": self.trace_store,
  1568. "trace_id": trace_id,
  1569. "goal_id": current_goal_id,
  1570. "runner": self,
  1571. "goal_tree": goal_tree,
  1572. "knowledge_config": config.knowledge,
  1573. "sequence": sequence, # 添加sequence用于知识注入记录
  1574. # 新增:侧分支信息
  1575. "side_branch": {
  1576. "type": side_branch_ctx.type,
  1577. "branch_id": side_branch_ctx.branch_id,
  1578. "is_side_branch": True,
  1579. "max_turns": side_branch_ctx.max_turns,
  1580. "trigger_event": trigger_event_for_tool,
  1581. } if side_branch_ctx else None,
  1582. # 合并用户自定义 context(RunConfig.context)
  1583. **(config.context or {}),
  1584. },
  1585. )
  1586. # 如果是 goal 工具,记录执行后的状态
  1587. if tool_name == "goal" and goal_tree:
  1588. self.log.debug(f"[Goal Tool] After execution: goal_tree.goals={len(goal_tree.goals)}, current_id={goal_tree.current_id}")
  1589. # 跟踪上传的知识(通过 upload_knowledge)
  1590. if tool_name == "upload_knowledge" and isinstance(tool_result, dict):
  1591. metadata = tool_result.get("metadata", {})
  1592. # upload_knowledge 返回的是统计信息,不是单个 knowledge_id
  1593. # 这里只记录上传动作,不跟踪具体 ID
  1594. self.log.info(f"[Knowledge Tracking] 知识已上传到 Knowledge Manager")
  1595. # --- 支持多模态工具反馈 ---
  1596. # execute() 返回 dict{"text","images","tool_usage"} 或 str
  1597. # 统一为dict格式
  1598. if isinstance(tool_result, str):
  1599. tool_result = {"text": tool_result}
  1600. tool_text = tool_result.get("text", str(tool_result))
  1601. tool_images = tool_result.get("images", [])
  1602. tool_usage = tool_result.get("tool_usage") # 新增:提取tool_usage
  1603. # 处理多模态消息
  1604. if tool_images:
  1605. tool_result_text = tool_text
  1606. # 构建多模态消息格式
  1607. tool_content_for_llm = [{"type": "text", "text": tool_text}]
  1608. for img in tool_images:
  1609. if img.get("type") == "base64" and img.get("data"):
  1610. media_type = img.get("media_type", "image/png")
  1611. tool_content_for_llm.append({
  1612. "type": "image_url",
  1613. "image_url": {
  1614. "url": f"data:{media_type};base64,{img['data']}"
  1615. }
  1616. })
  1617. elif img.get("type") == "url" and img.get("url"):
  1618. tool_content_for_llm.append({
  1619. "type": "image_url",
  1620. "image_url": {
  1621. "url": img["url"]
  1622. }
  1623. })
  1624. img_count = len(tool_content_for_llm) - 1 # 减去 text 块
  1625. print(f"[Runner] 多模态工具反馈: tool={tool_name}, images={img_count}, text_len={len(tool_result_text)}")
  1626. else:
  1627. tool_result_text = tool_text
  1628. tool_content_for_llm = tool_text
  1629. tool_msg = Message.create(
  1630. trace_id=trace_id,
  1631. role="tool",
  1632. sequence=sequence,
  1633. goal_id=current_goal_id,
  1634. parent_sequence=head_seq,
  1635. tool_call_id=tc["id"],
  1636. branch_type=side_branch_ctx.type if side_branch_ctx else None,
  1637. branch_id=side_branch_ctx.branch_id if side_branch_ctx else None,
  1638. # 存储完整内容:有图片时保留 list(含 image_url),纯文本时存字符串
  1639. content={"tool_name": tool_name, "result": tool_content_for_llm},
  1640. )
  1641. if self.trace_store:
  1642. await self.trace_store.add_message(tool_msg)
  1643. # 记录工具的模型使用
  1644. if tool_usage:
  1645. await self.trace_store.record_model_usage(
  1646. trace_id=trace_id,
  1647. sequence=sequence,
  1648. role="tool",
  1649. tool_name=tool_name,
  1650. model=tool_usage.get("model"),
  1651. prompt_tokens=tool_usage.get("prompt_tokens", 0),
  1652. completion_tokens=tool_usage.get("completion_tokens", 0),
  1653. cache_read_tokens=tool_usage.get("cache_read_tokens", 0),
  1654. )
  1655. # 截图单独存为同名 PNG 文件
  1656. if tool_images:
  1657. import base64 as b64mod
  1658. for img in tool_images:
  1659. if img.get("data"):
  1660. png_path = self.trace_store._get_messages_dir(trace_id) / f"{tool_msg.message_id}.png"
  1661. png_path.write_bytes(b64mod.b64decode(img["data"]))
  1662. print(f"[Runner] 截图已保存: {png_path.name}")
  1663. break # 只存第一张
  1664. # 如果在侧分支,tool_msg 已持久化(不需要额外维护)
  1665. yield tool_msg
  1666. head_seq = sequence
  1667. sequence += 1
  1668. history.append({
  1669. "role": "tool",
  1670. "tool_call_id": tc["id"],
  1671. "name": tool_name,
  1672. "content": tool_content_for_llm,
  1673. "_message_id": tool_msg.message_id,
  1674. })
  1675. # 更新 skill 注入追踪记录
  1676. if tool_name == "skill" and tc["id"].startswith("call_skill_"):
  1677. try:
  1678. skill_args = json.loads(tc["function"]["arguments"]) if isinstance(tc["function"]["arguments"], str) else tc["function"]["arguments"]
  1679. injected_skill_name = skill_args.get("skill_name", "")
  1680. if injected_skill_name:
  1681. await self._update_skill_injection_record(
  1682. trace_id, trace, injected_skill_name,
  1683. tool_msg.message_id, tool_msg.sequence,
  1684. )
  1685. self.log.info(f"[Skill 指定注入] 已记录 {injected_skill_name} → msg={tool_msg.message_id}")
  1686. except Exception as e:
  1687. self.log.warning(f"[Skill 指定注入] 记录追踪失败: {e}")
  1688. # on_complete 模式:goal(done=...) 后立即压缩该 goal 的消息
  1689. if (
  1690. not side_branch_ctx
  1691. and config.goal_compression == "on_complete"
  1692. and self.trace_store
  1693. and goal_tree
  1694. ):
  1695. has_goal_done = False
  1696. for tc in tool_calls:
  1697. if tc["function"]["name"] != "goal":
  1698. continue
  1699. try:
  1700. raw = tc["function"]["arguments"]
  1701. args = json.loads(raw) if isinstance(raw, str) and raw.strip() else {}
  1702. except (json.JSONDecodeError, TypeError):
  1703. args = {}
  1704. if args.get("done") is not None:
  1705. has_goal_done = True
  1706. break
  1707. if has_goal_done:
  1708. main_path_msgs = await self.trace_store.get_main_path_messages(
  1709. trace_id, head_seq
  1710. )
  1711. compressed_msgs = compress_completed_goals(main_path_msgs, goal_tree)
  1712. if len(compressed_msgs) < len(main_path_msgs):
  1713. self.log.info(
  1714. "on_complete 压缩: %d -> %d 条消息",
  1715. len(main_path_msgs), len(compressed_msgs),
  1716. )
  1717. history = [msg.to_llm_dict() for msg in compressed_msgs]
  1718. continue # 继续循环
  1719. # 无工具调用
  1720. # 如果在侧分支中,已经在上面处理过了(不会走到这里)
  1721. # 主路径无工具调用 → 任务完成,检查是否需要完成后反思或知识评估
  1722. # 检查是否有待评估的知识
  1723. if not side_branch_ctx and self.trace_store:
  1724. pending = await self.trace_store.get_pending_knowledge_entries(trace_id)
  1725. if pending:
  1726. self.log.info(f"任务即将结束,但仍有 {len(pending)} 条知识未评估,强制触发评估")
  1727. config.force_side_branch = ["knowledge_eval"]
  1728. trace = await self.trace_store.get_trace(trace_id)
  1729. if trace:
  1730. trace.context["knowledge_eval_trigger"] = "task_completion"
  1731. await self.trace_store.update_trace(trace_id, context=trace.context)
  1732. continue
  1733. if not side_branch_ctx and config.knowledge.enable_completion_extraction and not break_after_side_branch:
  1734. config.force_side_branch = ["reflection"]
  1735. break_after_side_branch = True
  1736. self.log.info("任务完成,进入完成后反思侧分支")
  1737. continue
  1738. break
  1739. # 清理 trace 相关的跟踪数据
  1740. self._context_warned.pop(trace_id, None)
  1741. self._context_usage.pop(trace_id, None)
  1742. self._saved_knowledge_ids.pop(trace_id, None)
  1743. # 更新 head_sequence 并完成 Trace
  1744. if self.trace_store:
  1745. await self.trace_store.update_trace(
  1746. trace_id,
  1747. status="completed",
  1748. head_sequence=head_seq,
  1749. completed_at=datetime.now(),
  1750. )
  1751. trace_obj = await self.trace_store.get_trace(trace_id)
  1752. if trace_obj:
  1753. yield trace_obj
  1754. # ===== 压缩辅助方法 =====
  1755. def _rebuild_history_after_compression(
  1756. self,
  1757. history: List[Dict],
  1758. summary_msg_dict: Dict,
  1759. label: str = "压缩",
  1760. ) -> List[Dict]:
  1761. """
  1762. 压缩后重建 history:system prompt + 第一条 user message + summary
  1763. Args:
  1764. history: 压缩前的 history
  1765. summary_msg_dict: summary 消息的 LLM dict
  1766. label: 日志标签
  1767. Returns:
  1768. 新的 history
  1769. """
  1770. system_msg = None
  1771. first_user_msg = None
  1772. for msg in history:
  1773. if msg.get("role") == "system" and not system_msg:
  1774. system_msg = msg
  1775. elif msg.get("role") == "user" and not first_user_msg:
  1776. first_user_msg = msg
  1777. if system_msg and first_user_msg:
  1778. break
  1779. new_history = []
  1780. if system_msg:
  1781. new_history.append(system_msg)
  1782. if first_user_msg:
  1783. new_history.append(first_user_msg)
  1784. new_history.append(summary_msg_dict)
  1785. self.log.info(f"{label}完成: {len(history)} → {len(new_history)} 条消息")
  1786. for idx, msg in enumerate(new_history):
  1787. role = msg.get("role", "unknown")
  1788. content = msg.get("content", "")
  1789. if isinstance(content, str):
  1790. preview = content
  1791. elif isinstance(content, list):
  1792. preview = f"[{len(content)} blocks]"
  1793. else:
  1794. preview = str(content)
  1795. self.log.info(f" {label}后[{idx}] {role}: {preview}")
  1796. return new_history
  1797. # ===== 回溯(Rewind)=====
  1798. async def _rewind(
  1799. self,
  1800. trace_id: str,
  1801. after_sequence: int,
  1802. goal_tree: Optional[GoalTree],
  1803. ) -> int:
  1804. """
  1805. 执行回溯:快照 GoalTree,重建干净树,设置 head_sequence
  1806. 新消息的 parent_sequence 将指向 rewind 点,旧消息通过树结构自然脱离主路径。
  1807. Returns:
  1808. 下一个可用的 sequence 号
  1809. """
  1810. if not self.trace_store:
  1811. raise ValueError("trace_store required for rewind")
  1812. # 1. 加载所有 messages(用于 safe cutoff 和 max sequence)
  1813. all_messages = await self.trace_store.get_trace_messages(trace_id)
  1814. if not all_messages:
  1815. return 1
  1816. # 2. 找到安全截断点(确保不截断在 tool_call 和 tool response 之间)
  1817. cutoff = self._find_safe_cutoff(all_messages, after_sequence)
  1818. # 3. 快照并重建 GoalTree
  1819. if goal_tree:
  1820. # 获取截断点消息的 created_at 作为时间界限
  1821. cutoff_msg = None
  1822. for msg in all_messages:
  1823. if msg.sequence == cutoff:
  1824. cutoff_msg = msg
  1825. break
  1826. cutoff_time = cutoff_msg.created_at if cutoff_msg else datetime.now()
  1827. # 快照到 events(含 head_sequence 供前端感知分支切换)
  1828. await self.trace_store.append_event(trace_id, "rewind", {
  1829. "after_sequence": cutoff,
  1830. "head_sequence": cutoff,
  1831. "goal_tree_snapshot": goal_tree.to_dict(),
  1832. })
  1833. # 按时间重建干净的 GoalTree
  1834. new_tree = goal_tree.rebuild_for_rewind(cutoff_time)
  1835. await self.trace_store.update_goal_tree(trace_id, new_tree)
  1836. # 更新内存中的引用
  1837. goal_tree.goals = new_tree.goals
  1838. goal_tree.current_id = new_tree.current_id
  1839. # 4. 更新 head_sequence 到 rewind 点
  1840. await self.trace_store.update_trace(trace_id, head_sequence=cutoff)
  1841. # 5. 返回 next sequence(全局递增,不复用)
  1842. max_seq = max((m.sequence for m in all_messages), default=0)
  1843. return max_seq + 1
  1844. def _find_safe_cutoff(self, messages: List[Message], after_sequence: int) -> int:
  1845. """
  1846. 找到安全的截断点。
  1847. 如果 after_sequence 指向一条带 tool_calls 的 assistant message,
  1848. 则自动扩展到其所有对应的 tool response 之后。
  1849. """
  1850. cutoff = after_sequence
  1851. # 找到 after_sequence 对应的 message
  1852. target_msg = None
  1853. for msg in messages:
  1854. if msg.sequence == after_sequence:
  1855. target_msg = msg
  1856. break
  1857. if not target_msg:
  1858. return cutoff
  1859. # 如果是 assistant 且有 tool_calls,找到所有对应的 tool responses
  1860. if target_msg.role == "assistant":
  1861. content = target_msg.content
  1862. if isinstance(content, dict) and content.get("tool_calls"):
  1863. tool_call_ids = set()
  1864. for tc in content["tool_calls"]:
  1865. if isinstance(tc, dict) and tc.get("id"):
  1866. tool_call_ids.add(tc["id"])
  1867. # 找到这些 tool_call 对应的 tool messages
  1868. for msg in messages:
  1869. if (msg.role == "tool" and msg.tool_call_id
  1870. and msg.tool_call_id in tool_call_ids):
  1871. cutoff = max(cutoff, msg.sequence)
  1872. return cutoff
  1873. async def _heal_orphaned_tool_calls(
  1874. self,
  1875. messages: List[Message],
  1876. trace_id: str,
  1877. goal_tree: Optional[GoalTree],
  1878. sequence: int,
  1879. ) -> tuple:
  1880. """
  1881. 检测并修复消息历史中的 orphaned tool_calls。
  1882. 当 agent 被 stop/crash 中断时,可能有 assistant 的 tool_calls 没有对应的
  1883. tool results(包括多 tool_call 部分完成的情况)。直接发给 LLM 会导致 400。
  1884. 修复策略:为每个缺失的 tool_result 插入合成的"中断通知"消息,而非裁剪。
  1885. - 普通工具:简短中断提示
  1886. - agent/evaluate:包含 sub_trace_id、执行统计、continue_from 指引
  1887. 合成消息持久化到 store,确保幂等(下次续跑不再触发)。
  1888. Returns:
  1889. (healed_messages, next_sequence)
  1890. """
  1891. if not messages:
  1892. return messages, sequence
  1893. # 收集所有 tool_call IDs → (assistant_msg, tool_call_dict)
  1894. tc_map: Dict[str, tuple] = {}
  1895. result_ids: set = set()
  1896. for msg in messages:
  1897. if msg.role == "assistant":
  1898. content = msg.content
  1899. if isinstance(content, dict) and content.get("tool_calls"):
  1900. for tc in content["tool_calls"]:
  1901. tc_id = tc.get("id")
  1902. if tc_id:
  1903. tc_map[tc_id] = (msg, tc)
  1904. elif msg.role == "tool" and msg.tool_call_id:
  1905. result_ids.add(msg.tool_call_id)
  1906. orphaned_ids = [tc_id for tc_id in tc_map if tc_id not in result_ids]
  1907. if not orphaned_ids:
  1908. return messages, sequence
  1909. self.log.info(
  1910. "检测到 %d 个 orphaned tool_calls,生成合成中断通知",
  1911. len(orphaned_ids),
  1912. )
  1913. healed = list(messages)
  1914. head_seq = messages[-1].sequence
  1915. for tc_id in orphaned_ids:
  1916. assistant_msg, tc = tc_map[tc_id]
  1917. tool_name = tc.get("function", {}).get("name", "unknown")
  1918. if tool_name in ("agent", "evaluate"):
  1919. result_text = self._build_agent_interrupted_result(
  1920. tc, goal_tree, assistant_msg,
  1921. )
  1922. else:
  1923. result_text = build_tool_interrupted_message(tool_name)
  1924. synthetic_msg = Message.create(
  1925. trace_id=trace_id,
  1926. role="tool",
  1927. sequence=sequence,
  1928. goal_id=assistant_msg.goal_id,
  1929. parent_sequence=head_seq,
  1930. tool_call_id=tc_id,
  1931. content={"tool_name": tool_name, "result": result_text},
  1932. )
  1933. if self.trace_store:
  1934. await self.trace_store.add_message(synthetic_msg)
  1935. healed.append(synthetic_msg)
  1936. head_seq = sequence
  1937. sequence += 1
  1938. # 更新 trace head/last sequence
  1939. if self.trace_store:
  1940. await self.trace_store.update_trace(
  1941. trace_id,
  1942. head_sequence=head_seq,
  1943. last_sequence=max(head_seq, sequence - 1),
  1944. )
  1945. return healed, sequence
  1946. def _build_agent_interrupted_result(
  1947. self,
  1948. tc: Dict,
  1949. goal_tree: Optional[GoalTree],
  1950. assistant_msg: Message,
  1951. ) -> str:
  1952. """为中断的 agent/evaluate 工具调用构建合成结果(对齐正常返回值格式)"""
  1953. args_str = tc.get("function", {}).get("arguments", "{}")
  1954. try:
  1955. args = json.loads(args_str) if isinstance(args_str, str) else args_str
  1956. except json.JSONDecodeError:
  1957. args = {}
  1958. task = args.get("task", "未知任务")
  1959. if isinstance(task, list):
  1960. task = "; ".join(task)
  1961. tool_name = tc.get("function", {}).get("name", "agent")
  1962. mode = "evaluate" if tool_name == "evaluate" else "delegate"
  1963. # 从 goal_tree 查找 sub_trace 信息
  1964. sub_trace_id = None
  1965. stats = None
  1966. if goal_tree and assistant_msg.goal_id:
  1967. goal = goal_tree.find(assistant_msg.goal_id)
  1968. if goal and goal.sub_trace_ids:
  1969. first = goal.sub_trace_ids[0]
  1970. if isinstance(first, dict):
  1971. sub_trace_id = first.get("trace_id")
  1972. elif isinstance(first, str):
  1973. sub_trace_id = first
  1974. if goal.cumulative_stats:
  1975. s = goal.cumulative_stats
  1976. if s.message_count > 0:
  1977. stats = {
  1978. "message_count": s.message_count,
  1979. "total_tokens": s.total_tokens,
  1980. "total_cost": round(s.total_cost, 4),
  1981. }
  1982. result: Dict[str, Any] = {
  1983. "mode": mode,
  1984. "status": "interrupted",
  1985. "summary": AGENT_INTERRUPTED_SUMMARY,
  1986. "task": task,
  1987. }
  1988. if sub_trace_id:
  1989. result["sub_trace_id"] = sub_trace_id
  1990. result["hint"] = build_agent_continue_hint(sub_trace_id)
  1991. if stats:
  1992. result["stats"] = stats
  1993. return json.dumps(result, ensure_ascii=False, indent=2)
  1994. # ===== 上下文注入 =====
  1995. # ===== Skill 指定注入 =====
  1996. def _check_skills_need_injection(
  1997. self,
  1998. trace: Trace,
  1999. inject_skills: List[str],
  2000. history: List[Dict],
  2001. recency_threshold: int,
  2002. ) -> List[str]:
  2003. """
  2004. 检查哪些 skill 需要注入。
  2005. 通过 trace.context["injected_skills"] 中记录的 message_id
  2006. 检查是否仍在当前 history 的最近 recency_threshold 条消息中。
  2007. Returns:
  2008. 需要注入的 skill 名称列表
  2009. """
  2010. injected = (trace.context or {}).get("injected_skills", {})
  2011. # 收集 history 中最近 recency_threshold 条消息的 message_id
  2012. recent_msgs = history[-recency_threshold:] if recency_threshold > 0 else []
  2013. recent_ids = set()
  2014. for msg in recent_msgs:
  2015. mid = msg.get("message_id") or msg.get("_message_id")
  2016. if mid:
  2017. recent_ids.add(mid)
  2018. needs_inject = []
  2019. for skill_name in inject_skills:
  2020. record = injected.get(skill_name)
  2021. if not record:
  2022. needs_inject.append(skill_name)
  2023. continue
  2024. if record.get("message_id") not in recent_ids:
  2025. needs_inject.append(skill_name)
  2026. return needs_inject
  2027. async def _update_skill_injection_record(
  2028. self,
  2029. trace_id: str,
  2030. trace: Trace,
  2031. skill_name: str,
  2032. message_id: str,
  2033. sequence: int,
  2034. ):
  2035. """更新 trace.context 中的 skill 注入记录"""
  2036. if not trace.context:
  2037. trace.context = {}
  2038. if "injected_skills" not in trace.context:
  2039. trace.context["injected_skills"] = {}
  2040. trace.context["injected_skills"][skill_name] = {
  2041. "message_id": message_id,
  2042. "sequence": sequence,
  2043. }
  2044. if self.trace_store:
  2045. await self.trace_store.update_trace(trace_id, context=trace.context)
  2046. # ===== 上下文注入 =====
  2047. def _build_context_injection(
  2048. self,
  2049. trace: Trace,
  2050. goal_tree: Optional[GoalTree],
  2051. ) -> str:
  2052. """构建周期性注入的上下文(GoalTree + Active Collaborators + Focus 提醒 + IM 消息通知)"""
  2053. from datetime import datetime
  2054. parts = [f"## Current Time\n\n{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"]
  2055. # GoalTree
  2056. if goal_tree and goal_tree.goals:
  2057. parts.append(f"## Current Plan\n\n{goal_tree.to_prompt()}")
  2058. if goal_tree.current_id:
  2059. # 检测 focus 在有子节点的父目标上:提醒模型 focus 到具体子目标
  2060. children = goal_tree.get_children(goal_tree.current_id)
  2061. pending_children = [c for c in children if c.status in ("pending", "in_progress")]
  2062. if pending_children:
  2063. child_ids = ", ".join(
  2064. goal_tree._generate_display_id(c) for c in pending_children[:3]
  2065. )
  2066. parts.append(
  2067. f"**提醒**:当前焦点在父目标上,建议用 `goal(focus=\"...\")` "
  2068. f"切换到具体子目标(如 {child_ids})再执行。"
  2069. )
  2070. else:
  2071. # 无焦点:提醒模型 focus
  2072. parts.append(
  2073. "**提醒**:当前没有焦点目标。请用 `goal(focus=\"...\")` 选择一个目标开始执行。"
  2074. )
  2075. # Active Collaborators
  2076. collaborators = trace.context.get("collaborators", [])
  2077. if collaborators:
  2078. lines = ["## Active Collaborators"]
  2079. for c in collaborators:
  2080. status_str = c.get("status", "unknown")
  2081. ctype = c.get("type", "agent")
  2082. summary = c.get("summary", "")
  2083. name = c.get("name", "unnamed")
  2084. lines.append(f"- {name} [{ctype}, {status_str}]: {summary}")
  2085. parts.append("\n".join(lines))
  2086. # IM 消息通知(Research Agent)
  2087. im_config = trace.context.get("im_config")
  2088. if im_config:
  2089. contact_id = im_config.get("contact_id")
  2090. chat_id = im_config.get("chat_id")
  2091. if contact_id and chat_id:
  2092. # 尝试导入 IM 模块并检查通知
  2093. try:
  2094. from agent.tools.builtin.im import chat as im_chat
  2095. notification = im_chat._notifications.get((contact_id, chat_id))
  2096. if notification:
  2097. count = notification.get("count", 0)
  2098. senders = notification.get("from", [])
  2099. senders_str = ", ".join(senders)
  2100. parts.append(
  2101. f"## IM 消息通知\n\n"
  2102. f"你有 {count} 条新消息,来自: {senders_str}\n"
  2103. f"使用 `im_receive_messages(contact_id=\"{contact_id}\", chat_id=\"{chat_id}\")` 查看消息内容。"
  2104. )
  2105. else:
  2106. parts.append("## IM 消息通知\n\n暂无新消息")
  2107. except (ImportError, AttributeError):
  2108. # IM 模块未加载或不可用
  2109. pass
  2110. # Knowledge Manager 队列状态
  2111. km_queue_size = trace.context.get("km_queue_size")
  2112. if km_queue_size is not None:
  2113. current_sender = trace.context.get("current_sender", "unknown")
  2114. if km_queue_size > 0:
  2115. parts.append(
  2116. f"## 消息队列状态\n\n"
  2117. f"当前处理: {current_sender} 的消息\n"
  2118. f"队列中还有 {km_queue_size} 条待处理消息"
  2119. )
  2120. else:
  2121. parts.append(
  2122. f"## 消息队列状态\n\n"
  2123. f"当前处理: {current_sender} 的消息\n"
  2124. f"队列为空,处理完本条消息后将进入休眠"
  2125. )
  2126. return "\n\n".join(parts)
  2127. # ===== 辅助方法 =====
  2128. async def _optimize_images(self, messages: List[Dict], model: str) -> List[Dict]:
  2129. """
  2130. 分级优化已处理的图片,节省 token
  2131. 策略(基于图片距离最后一条 assistant 的"轮次"):
  2132. 1. 最近 1-2 轮:保留原图
  2133. 2. 3-5 轮:降低分辨率和压缩(节省 token 但保留视觉信息)
  2134. 3. 5 轮以上:调用小模型生成文本描述 + 保留 URL
  2135. 处理结果会缓存,避免重复的 PIL 解码/编码和 LLM 调用。
  2136. Args:
  2137. messages: 原始消息列表
  2138. model: 当前使用的模型(用于选择描述生成模型)
  2139. Returns:
  2140. 优化后的消息列表(深拷贝)
  2141. """
  2142. if not messages:
  2143. return messages
  2144. # 找到最后一条 assistant message 的位置
  2145. last_assistant_idx = -1
  2146. for i in range(len(messages) - 1, -1, -1):
  2147. if messages[i].get("role") == "assistant":
  2148. last_assistant_idx = i
  2149. break
  2150. # 如果没有 assistant message,说明还没开始对话,不优化
  2151. if last_assistant_idx == -1:
  2152. return messages
  2153. # 统计从每个位置到最后一条 assistant 之间的 assistant 数量(作为"轮次")
  2154. assistant_count_after = [0] * len(messages)
  2155. count = 0
  2156. for i in range(len(messages) - 1, -1, -1):
  2157. assistant_count_after[i] = count
  2158. if messages[i].get("role") == "assistant":
  2159. count += 1
  2160. # 深拷贝避免修改原始数据
  2161. import copy
  2162. import hashlib
  2163. import asyncio
  2164. import base64 as b64mod
  2165. import httpx
  2166. import mimetypes
  2167. messages = copy.deepcopy(messages)
  2168. # 预处理:将所有 HTTP(S) URL 图片下载并转为 base64 data URL
  2169. # Qwen API 无法访问外部签名 URL(如 BFL、火山引擎 TOS),必须在本地转换
  2170. url_download_jobs = [] # [(msg_idx, block_idx, url)]
  2171. for i, msg in enumerate(messages):
  2172. if msg.get("role") != "tool":
  2173. continue
  2174. content = msg.get("content")
  2175. if not isinstance(content, list):
  2176. continue
  2177. for block_idx, block in enumerate(content):
  2178. if isinstance(block, dict) and block.get("type") == "image_url":
  2179. url = block.get("image_url", {}).get("url", "")
  2180. if url.startswith(("http://", "https://")):
  2181. url_download_jobs.append((i, block_idx, url))
  2182. if url_download_jobs:
  2183. async def _download_image_to_data_url(url: str) -> str | None:
  2184. try:
  2185. async with httpx.AsyncClient(timeout=60, trust_env=False) as client:
  2186. resp = await client.get(url)
  2187. resp.raise_for_status()
  2188. ct = resp.headers.get("content-type", "").split(";")[0].strip()
  2189. if not ct.startswith("image/"):
  2190. ct = mimetypes.guess_type(url.split("?")[0])[0] or "image/png"
  2191. b64 = b64mod.b64encode(resp.content).decode()
  2192. return f"data:{ct};base64,{b64}"
  2193. except Exception:
  2194. return None
  2195. results = await asyncio.gather(
  2196. *[_download_image_to_data_url(url) for _, _, url in url_download_jobs],
  2197. return_exceptions=True
  2198. )
  2199. converted = 0
  2200. for (msg_idx, block_idx, original_url), result in zip(url_download_jobs, results):
  2201. if isinstance(result, str) and result.startswith("data:"):
  2202. messages[msg_idx]["content"][block_idx]["image_url"]["url"] = result
  2203. converted += 1
  2204. if converted:
  2205. self.log.info(f"[Image Optimization] URL→base64 预转换: {converted}/{len(url_download_jobs)} 张")
  2206. # 统计优化情况
  2207. stats = {"kept": 0, "downscaled": 0, "described": 0, "cache_hit": 0}
  2208. # 收集需要降分辨率的图片(用于并发处理)
  2209. downscale_jobs = [] # [(msg_idx, block_idx, image_url, cache_key)]
  2210. # 第一遍:扫描并收集需要处理的图片
  2211. for i in range(last_assistant_idx):
  2212. msg = messages[i]
  2213. if msg.get("role") != "tool":
  2214. continue
  2215. content = msg.get("content")
  2216. if not isinstance(content, list):
  2217. continue
  2218. rounds_ago = assistant_count_after[i]
  2219. for block_idx, block in enumerate(content):
  2220. if isinstance(block, dict) and block.get("type") == "image_url":
  2221. image_url_obj = block.get("image_url", {})
  2222. image_url = image_url_obj.get("url", "")
  2223. if image_url.startswith("data:"):
  2224. cache_key = hashlib.md5(image_url[:200].encode()).hexdigest()
  2225. else:
  2226. cache_key = hashlib.md5(image_url.encode()).hexdigest()
  2227. # 3-5 轮需要降分辨率
  2228. if 2 < rounds_ago <= 5:
  2229. cached = self._image_opt_cache.get(cache_key, {})
  2230. if "downscaled" not in cached and image_url.startswith("data:"):
  2231. downscale_jobs.append((i, block_idx, image_url, cache_key))
  2232. # 并发处理所有降分辨率任务
  2233. if downscale_jobs:
  2234. downscale_results = await asyncio.gather(
  2235. *[self._downscale_image(url) for _, _, url, _ in downscale_jobs],
  2236. return_exceptions=True
  2237. )
  2238. for (_, _, _, cache_key), result in zip(downscale_jobs, downscale_results):
  2239. if not isinstance(result, Exception) and result is not None:
  2240. self._image_opt_cache.setdefault(cache_key, {})["downscaled"] = result
  2241. # 第二遍:应用处理结果
  2242. for i in range(last_assistant_idx):
  2243. msg = messages[i]
  2244. if msg.get("role") != "tool":
  2245. continue
  2246. content = msg.get("content")
  2247. if not isinstance(content, list):
  2248. continue
  2249. # 计算这条消息距离最后一条 assistant 的"轮次"
  2250. rounds_ago = assistant_count_after[i]
  2251. # 处理每个 content block
  2252. new_content = []
  2253. for block in content:
  2254. if isinstance(block, dict) and block.get("type") == "image_url":
  2255. image_url_obj = block.get("image_url", {})
  2256. image_url = image_url_obj.get("url", "")
  2257. # 生成缓存 key(URL 图片用 URL 本身,base64 用前 64 字符 hash)
  2258. if image_url.startswith("data:"):
  2259. cache_key = hashlib.md5(image_url[:200].encode()).hexdigest()
  2260. else:
  2261. cache_key = hashlib.md5(image_url.encode()).hexdigest()
  2262. # 根据距离决定处理策略
  2263. if rounds_ago <= 2:
  2264. # 最近 1-2 轮:保留原图
  2265. new_content.append(block)
  2266. stats["kept"] += 1
  2267. elif rounds_ago <= 5:
  2268. # 3-5 轮:降低分辨率(优先从缓存取)
  2269. cached = self._image_opt_cache.get(cache_key, {})
  2270. if "downscaled" in cached:
  2271. new_content.append({
  2272. "type": "image_url",
  2273. "image_url": {"url": cached["downscaled"]}
  2274. })
  2275. stats["downscaled"] += 1
  2276. stats["cache_hit"] += 1
  2277. elif image_url.startswith("data:"):
  2278. downscaled = await self._downscale_image(image_url)
  2279. if downscaled:
  2280. # 缓存结果
  2281. self._image_opt_cache.setdefault(cache_key, {})["downscaled"] = downscaled
  2282. new_content.append({
  2283. "type": "image_url",
  2284. "image_url": {"url": downscaled}
  2285. })
  2286. stats["downscaled"] += 1
  2287. else:
  2288. new_content.append(block)
  2289. stats["kept"] += 1
  2290. else:
  2291. # URL 图片:无法直接处理,保留原图
  2292. new_content.append(block)
  2293. stats["kept"] += 1
  2294. else:
  2295. # 5 轮以上:生成文本描述(优先从缓存取)
  2296. cached = self._image_opt_cache.get(cache_key, {})
  2297. if "description" in cached:
  2298. new_content.append(cached["description"])
  2299. stats["described"] += 1
  2300. stats["cache_hit"] += 1
  2301. else:
  2302. description = await self._generate_image_description(image_url, model)
  2303. url_info = f" (URL: {image_url[:100]}...)" if not image_url.startswith("data:") else ""
  2304. desc_block = {
  2305. "type": "text",
  2306. "text": f"[Image description: {description}]{url_info}"
  2307. }
  2308. # 缓存结果
  2309. self._image_opt_cache.setdefault(cache_key, {})["description"] = desc_block
  2310. new_content.append(desc_block)
  2311. stats["described"] += 1
  2312. else:
  2313. new_content.append(block)
  2314. msg["content"] = new_content
  2315. # print(f"[Image Opt Check] 扫描到 {stats['kept'] + stats['downscaled'] + stats['described']} 张图片上下文")
  2316. if stats["downscaled"] > 0 or stats["described"] > 0:
  2317. self.log.info(
  2318. f"[Image Optimization] 保留 {stats['kept']} 张,"
  2319. f"降分辨率 {stats['downscaled']} 张,"
  2320. f"文本描述 {stats['described']} 张,"
  2321. f"缓存命中 {stats['cache_hit']} 次"
  2322. )
  2323. return messages
  2324. async def _downscale_image(self, base64_url: str, max_size: int = 512) -> Optional[str]:
  2325. """
  2326. 降低 base64 图片的分辨率
  2327. Args:
  2328. base64_url: data:image/xxx;base64,... 格式的 URL
  2329. max_size: 最大边长(像素)
  2330. Returns:
  2331. 降分辨率后的 base64 URL,失败返回 None
  2332. """
  2333. try:
  2334. from PIL import Image
  2335. import io
  2336. import base64
  2337. # 解析 base64 数据
  2338. if not base64_url.startswith("data:"):
  2339. return None
  2340. header, data = base64_url.split(",", 1)
  2341. media_type = header.split(";")[0].split(":")[1] # image/png
  2342. # 解码图片
  2343. img_data = base64.b64decode(data)
  2344. img = Image.open(io.BytesIO(img_data))
  2345. # 计算新尺寸(保持宽高比)
  2346. width, height = img.size
  2347. if width <= max_size and height <= max_size:
  2348. return base64_url # 已经够小,不需要缩放
  2349. if width > height:
  2350. new_width = max_size
  2351. new_height = int(height * max_size / width)
  2352. else:
  2353. new_height = max_size
  2354. new_width = int(width * max_size / height)
  2355. # 缩放图片(使用更快的 BILINEAR 算法)
  2356. img_resized = img.resize((new_width, new_height), Image.Resampling.BILINEAR)
  2357. # 转换为 RGB(JPEG不支持 RGBA, P 等具有透明度或索引的模式)
  2358. if img_resized.mode != "RGB":
  2359. if img_resized.mode == "RGBA" or img_resized.mode == "P":
  2360. # Create a white background for transparent images
  2361. background = Image.new("RGB", img_resized.size, (255, 255, 255))
  2362. if img_resized.mode == "P" and "transparency" in img_resized.info:
  2363. img_resized = img_resized.convert("RGBA")
  2364. if img_resized.mode == "RGBA":
  2365. background.paste(img_resized, mask=img_resized.split()[3])
  2366. img_resized = background
  2367. img_resized = img_resized.convert("RGB")
  2368. # 重新编码为 JPEG(降低质量以加快速度)
  2369. buffer = io.BytesIO()
  2370. img_resized.save(buffer, format="JPEG", quality=60, optimize=False)
  2371. new_data = base64.b64encode(buffer.getvalue()).decode("utf-8")
  2372. return f"data:image/jpeg;base64,{new_data}"
  2373. except Exception as e:
  2374. self.log.warning(f"[Image Downscale] 降分辨率失败: {e}")
  2375. return None
  2376. async def _generate_image_description(self, image_url: str, current_model: str) -> str:
  2377. """
  2378. 使用小模型生成图片的文本描述
  2379. Args:
  2380. image_url: 图片 URL(base64 或 http(s))
  2381. current_model: 当前使用的模型
  2382. Returns:
  2383. 图片描述文本
  2384. """
  2385. try:
  2386. # 使用 qwen-vl-max(通义千问视觉模型)生成描述
  2387. # 注意:qwen-vl 系列专门支持视觉输入
  2388. description_model = "qwen-vl-max"
  2389. # 构建描述请求
  2390. messages = [
  2391. {
  2392. "role": "user",
  2393. "content": [
  2394. {
  2395. "type": "image_url",
  2396. "image_url": {"url": image_url}
  2397. },
  2398. {
  2399. "type": "text",
  2400. "text": "请用 1-2 句话简洁描述这张图片的主要内容。"
  2401. }
  2402. ]
  2403. }
  2404. ]
  2405. # 调用 LLM
  2406. result = await self.llm_call(
  2407. messages=messages,
  2408. model=description_model,
  2409. tools=None,
  2410. temperature=0.3,
  2411. )
  2412. description = result.get("content", "").strip()
  2413. return description if description else "图片内容"
  2414. except Exception as e:
  2415. self.log.warning(f"[Image Description] 生成描述失败: {e}")
  2416. return "图片内容"
  2417. def _add_cache_control(
  2418. self,
  2419. messages: List[Dict],
  2420. model: str,
  2421. enable: bool
  2422. ) -> List[Dict]:
  2423. """
  2424. 为支持的模型添加 Prompt Caching 标记
  2425. 策略:固定位置 + 延迟缓存
  2426. 1. 如果有未处理的图片(最后一条 assistant 之后的 tool messages 中有图片),跳过缓存
  2427. 2. system message 添加缓存(如果足够长)
  2428. 3. 固定位置缓存点(20, 40, 60, 80),确保每个缓存点间隔 >= 1024 tokens
  2429. 4. 最多使用 4 个缓存点(含 system)
  2430. Args:
  2431. messages: 原始消息列表
  2432. model: 模型名称
  2433. enable: 是否启用缓存
  2434. Returns:
  2435. 添加了 cache_control 的消息列表(深拷贝)
  2436. """
  2437. if not enable:
  2438. return messages
  2439. # 只对 Claude 模型启用
  2440. if "claude" not in model.lower():
  2441. return messages
  2442. # 延迟缓存:检查是否有未处理的图片
  2443. last_assistant_idx = -1
  2444. for i in range(len(messages) - 1, -1, -1):
  2445. if messages[i].get("role") == "assistant":
  2446. last_assistant_idx = i
  2447. break
  2448. # 检查最后一条 assistant 之后是否有包含图片的 tool messages
  2449. has_unprocessed_images = False
  2450. if last_assistant_idx >= 0:
  2451. for i in range(last_assistant_idx + 1, len(messages)):
  2452. msg = messages[i]
  2453. if msg.get("role") == "tool":
  2454. content = msg.get("content")
  2455. if isinstance(content, list):
  2456. has_unprocessed_images = any(
  2457. isinstance(block, dict) and block.get("type") == "image_url"
  2458. for block in content
  2459. )
  2460. if has_unprocessed_images:
  2461. break
  2462. if has_unprocessed_images:
  2463. self.log.debug("[Cache] 检测到未处理的图片,延迟缓存建立")
  2464. return messages
  2465. # 深拷贝避免修改原始数据
  2466. import copy
  2467. messages = copy.deepcopy(messages)
  2468. # 策略 1: 为 system message 添加缓存
  2469. system_cached = False
  2470. for msg in messages:
  2471. if msg.get("role") == "system":
  2472. content = msg.get("content", "")
  2473. if isinstance(content, str) and len(content) > 1000:
  2474. msg["content"] = [{
  2475. "type": "text",
  2476. "text": content,
  2477. "cache_control": {"type": "ephemeral"}
  2478. }]
  2479. system_cached = True
  2480. self.log.debug(f"[Cache] 为 system message 添加缓存标记 (len={len(content)})")
  2481. break
  2482. # 策略 2: 固定位置缓存点
  2483. CACHE_INTERVAL = 20
  2484. MAX_POINTS = 3 if system_cached else 4
  2485. MIN_TOKENS = 1024
  2486. AVG_TOKENS_PER_MSG = 70
  2487. total_msgs = len(messages)
  2488. if total_msgs == 0:
  2489. return messages
  2490. cache_positions = []
  2491. last_cache_pos = 0
  2492. for i in range(1, MAX_POINTS + 1):
  2493. target_pos = i * CACHE_INTERVAL - 1 # 19, 39, 59, 79
  2494. if target_pos >= total_msgs:
  2495. break
  2496. # 从目标位置开始查找合适的 user/assistant 消息
  2497. for j in range(target_pos, total_msgs):
  2498. msg = messages[j]
  2499. if msg.get("role") not in ("user", "assistant"):
  2500. continue
  2501. content = msg.get("content", "")
  2502. if not content:
  2503. continue
  2504. # 检查 content 是否非空
  2505. is_valid = False
  2506. if isinstance(content, str):
  2507. is_valid = len(content) > 0
  2508. elif isinstance(content, list):
  2509. is_valid = any(
  2510. isinstance(block, dict) and
  2511. block.get("type") == "text" and
  2512. len(block.get("text", "")) > 0
  2513. for block in content
  2514. )
  2515. if not is_valid:
  2516. continue
  2517. # 检查 token 距离
  2518. msg_count = j - last_cache_pos
  2519. estimated_tokens = msg_count * AVG_TOKENS_PER_MSG
  2520. if estimated_tokens >= MIN_TOKENS:
  2521. cache_positions.append(j)
  2522. last_cache_pos = j
  2523. self.log.debug(f"[Cache] 在位置 {j} 添加缓存点 (估算 {estimated_tokens} tokens)")
  2524. break
  2525. # 应用缓存标记
  2526. for idx in cache_positions:
  2527. msg = messages[idx]
  2528. content = msg.get("content", "")
  2529. if isinstance(content, str):
  2530. msg["content"] = [{
  2531. "type": "text",
  2532. "text": content,
  2533. "cache_control": {"type": "ephemeral"}
  2534. }]
  2535. self.log.debug(f"[Cache] 为 message[{idx}] ({msg.get('role')}) 添加缓存标记")
  2536. elif isinstance(content, list):
  2537. # 在最后一个 text block 添加 cache_control
  2538. for block in reversed(content):
  2539. if isinstance(block, dict) and block.get("type") == "text":
  2540. block["cache_control"] = {"type": "ephemeral"}
  2541. self.log.debug(f"[Cache] 为 message[{idx}] ({msg.get('role')}) 添加缓存标记")
  2542. break
  2543. self.log.debug(
  2544. f"[Cache] 总消息: {total_msgs}, "
  2545. f"缓存点: {len(cache_positions)} at {cache_positions}"
  2546. )
  2547. return messages
  2548. def _get_tool_schemas(
  2549. self,
  2550. tools: Optional[List[str]] = None,
  2551. tool_groups: Optional[List[str]] = None,
  2552. exclude_tools: Optional[List[str]] = None,
  2553. ) -> List[Dict]:
  2554. """
  2555. 获取工具 Schema
  2556. 合并策略(取并集):
  2557. - tool_groups 非空: 按分组白名单过滤得到基础工具集
  2558. - tools 非空: 追加指定的工具名(与 tool_groups 结果取并集)
  2559. - 两者都为 None: 返回所有已注册工具
  2560. 最后再用 exclude_tools 减去禁用的工具(如远程 agent 禁止 agent/evaluate)。
  2561. """
  2562. if tool_groups is not None:
  2563. tool_names = set(self.tools.get_tool_names(groups=tool_groups))
  2564. else:
  2565. tool_names = set(self.tools.get_tool_names())
  2566. if tools is not None:
  2567. tool_names |= set(tools)
  2568. if exclude_tools:
  2569. tool_names -= set(exclude_tools)
  2570. return self.tools.get_schemas(list(tool_names))
  2571. # 默认 system prompt 前缀(当 config.system_prompt 和前端都未提供 system message 时使用)
  2572. # 注意:此常量已迁移到 agent.core.prompts,这里保留引用以保持向后兼容
  2573. async def _build_system_prompt(self, config: RunConfig, base_prompt: Optional[str] = None) -> Optional[str]:
  2574. """构建 system prompt(注入 skills)
  2575. 优先级:
  2576. 1. base_prompt(来自消息)
  2577. 2. config.system_prompt(显式指定)
  2578. 3. preset.system_prompt(预设的完整 system prompt)
  2579. 4. 默认模板 + skills
  2580. Skills 注入优先级:
  2581. 1. config.skills 显式指定 → 按名称过滤
  2582. 2. config.skills 为 None → 查 preset 的默认 skills 列表
  2583. 3. preset 也无 skills(None)→ 加载全部(向后兼容)
  2584. Args:
  2585. base_prompt: 已有 system 内容(来自消息),
  2586. None 时使用 config.system_prompt 或 preset.system_prompt
  2587. """
  2588. from agent.core.presets import AGENT_PRESETS
  2589. # 确定 system_prompt 来源
  2590. if base_prompt is not None:
  2591. system_prompt = base_prompt
  2592. elif config.system_prompt is not None:
  2593. system_prompt = config.system_prompt
  2594. else:
  2595. # 尝试从 preset 获取 system_prompt
  2596. preset = AGENT_PRESETS.get(config.agent_type)
  2597. system_prompt = preset.system_prompt if preset and preset.system_prompt else None
  2598. # 确定要加载哪些 skills
  2599. skills_filter: Optional[List[str]] = config.skills
  2600. if skills_filter is None:
  2601. preset = AGENT_PRESETS.get(config.agent_type)
  2602. if preset is not None:
  2603. skills_filter = preset.skills # 可能仍为 None(加载全部)
  2604. # 加载并过滤
  2605. all_skills = load_skills_from_dir(self.skills_dir)
  2606. if skills_filter is not None:
  2607. skills = [s for s in all_skills if s.name in skills_filter]
  2608. else:
  2609. skills = all_skills
  2610. skills_text = self._format_skills(skills) if skills else ""
  2611. if system_prompt:
  2612. if skills_text:
  2613. system_prompt += f"\n\n## Skills\n{skills_text}"
  2614. else:
  2615. system_prompt = DEFAULT_SYSTEM_PREFIX
  2616. if skills_text:
  2617. system_prompt += f"\n\n## Skills\n{skills_text}"
  2618. if config.max_iterations and config.max_iterations > 0:
  2619. system_prompt += f"\n\n## Execution Constraint\n这是一项有严格步数限制的任务。你最多可以用 {config.max_iterations} 轮交互来解决问题。\n请务必【边查边写、随时存档】!每当你收集或得出一个有价值的独立结果(如收集到一个独立 Case),请立刻调用工具写入或追加到结果文件中,绝对不要等到所有任务都做完再最后一次性输出。这样即使触达步数上限被强制打断,你已经收集的成果也能安全保留!"
  2620. # Memory 注入(memory-bearing Agent)——在 system prompt 末尾追加
  2621. # 初版选择 system prompt 追加(见 agent/docs/memory.md 待定问题 1)。
  2622. # 好处:run 启动一次性注入、所有后续轮次都能看到、与 skills 注入方式一致。
  2623. # 代价:若记忆文件很大会持续占 prompt tokens —— 待观察后决定是否切换方案。
  2624. if config.memory:
  2625. try:
  2626. from agent.core.memory import load_memory_files, format_memory_injection
  2627. files = load_memory_files(config.memory)
  2628. memory_text = format_memory_injection(files)
  2629. if memory_text:
  2630. system_prompt += f"\n\n{memory_text}"
  2631. except Exception as e:
  2632. self.log.warning(f"[Memory] 加载记忆失败,跳过注入: {e}")
  2633. return system_prompt
  2634. async def _generate_task_name(self, messages: List[Dict]) -> str:
  2635. """生成任务名称:优先使用 utility_llm,fallback 到文本截取"""
  2636. # 提取 messages 中的文本内容
  2637. text_parts = []
  2638. for msg in messages:
  2639. content = msg.get("content", "")
  2640. if isinstance(content, str):
  2641. text_parts.append(content)
  2642. elif isinstance(content, list):
  2643. for part in content:
  2644. if isinstance(part, dict) and part.get("type") == "text":
  2645. text_parts.append(part.get("text", ""))
  2646. raw_text = " ".join(text_parts).strip()
  2647. if not raw_text:
  2648. return TASK_NAME_FALLBACK
  2649. # 尝试使用 utility_llm 生成标题
  2650. if self.utility_llm_call:
  2651. try:
  2652. result = await self.utility_llm_call(
  2653. messages=[
  2654. {"role": "system", "content": TASK_NAME_GENERATION_SYSTEM_PROMPT},
  2655. {"role": "user", "content": raw_text[:2000]},
  2656. ],
  2657. model="gpt-4o-mini", # 使用便宜模型
  2658. )
  2659. title = result.get("content", "").strip()
  2660. if title and len(title) < 100:
  2661. return title
  2662. except Exception:
  2663. pass
  2664. # Fallback: 截取前 50 字符
  2665. return raw_text[:50] + ("..." if len(raw_text) > 50 else "")
  2666. def _format_skills(self, skills: List[Skill]) -> str:
  2667. if not skills:
  2668. return ""
  2669. return "\n\n".join(s.to_prompt_text() for s in skills)