compaction.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352
  1. """
  2. Context 压缩 — 两级压缩策略
  3. Level 1: Goal 完成压缩(确定性,零 LLM 成本)
  4. - 对 completed/abandoned goals:保留 goal 工具消息,移除非 goal 工具消息
  5. - 三种模式:none / on_complete / on_overflow
  6. Level 2: LLM 总结(仅在 Level 1 后仍超限时触发)
  7. - 通过侧分支多轮 agent 模式压缩
  8. - 压缩后重建 history 为:system prompt + 第一条 user message + summary
  9. 压缩不修改存储:原始消息永远保留在 messages/,纯内存操作。
  10. """
  11. import copy
  12. import json
  13. import logging
  14. from dataclasses import dataclass
  15. from typing import List, Dict, Any, Optional, Set
  16. from .goal_models import GoalTree
  17. from .models import Message
  18. from agent.core.prompts import (
  19. REFLECT_PROMPT,
  20. build_compression_eval_prompt,
  21. )
  22. logger = logging.getLogger(__name__)
  23. # ===== 模型 Context Window(tokens)=====
  24. MODEL_CONTEXT_WINDOWS: Dict[str, int] = {
  25. # Anthropic Claude
  26. "claude-sonnet-4": 200_000,
  27. "claude-opus-4": 200_000,
  28. "claude-3-5-sonnet": 200_000,
  29. "claude-3-5-haiku": 200_000,
  30. "claude-3-opus": 200_000,
  31. "claude-3-sonnet": 200_000,
  32. "claude-3-haiku": 200_000,
  33. # OpenAI
  34. "gpt-4o": 128_000,
  35. "gpt-4o-mini": 128_000,
  36. "gpt-4-turbo": 128_000,
  37. "gpt-4": 8_192,
  38. "o1": 200_000,
  39. "o3-mini": 200_000,
  40. # Google Gemini
  41. "gemini-2.5-pro": 1_000_000,
  42. "gemini-2.5-flash": 1_000_000,
  43. "gemini-2.0-flash": 1_000_000,
  44. "gemini-1.5-pro": 2_000_000,
  45. "gemini-1.5-flash": 1_000_000,
  46. # DeepSeek
  47. "deepseek-chat": 64_000,
  48. "deepseek-r1": 64_000,
  49. }
  50. DEFAULT_CONTEXT_WINDOW = 200_000
  51. def get_context_window(model: str) -> int:
  52. """
  53. 根据模型名称获取 context window 大小。
  54. 支持带 provider 前缀的模型名(如 "anthropic/claude-sonnet-4.5")和
  55. 带版本后缀的名称(如 "claude-3-5-sonnet-20241022")。
  56. """
  57. # 去掉 provider 前缀
  58. name = model.split("/")[-1].lower()
  59. # 精确匹配
  60. if name in MODEL_CONTEXT_WINDOWS:
  61. return MODEL_CONTEXT_WINDOWS[name]
  62. # 前缀匹配(处理版本后缀)
  63. for key, window in MODEL_CONTEXT_WINDOWS.items():
  64. if name.startswith(key):
  65. return window
  66. return DEFAULT_CONTEXT_WINDOW
  67. # ===== 配置 =====
  68. @dataclass
  69. class CompressionConfig:
  70. """压缩配置"""
  71. max_tokens: int = 0 # 最大 token 数(0 = 自动:context_window * 0.5)
  72. threshold_ratio: float = 0.5 # 触发压缩的阈值 = context_window 的比例
  73. keep_recent_messages: int = 10 # Level 1 中始终保留最近 N 条消息
  74. max_messages: int = 50 # 最大消息数(超过此数量触发压缩,0 = 禁用)
  75. def get_max_tokens(self, model: str) -> int:
  76. """获取实际的 max_tokens(如果为 0 则自动计算)"""
  77. if self.max_tokens > 0:
  78. return self.max_tokens
  79. window = get_context_window(model)
  80. return int(window * self.threshold_ratio)
  81. # ===== Level 1: Goal 完成压缩 =====
  82. def compress_completed_goals(
  83. messages: List[Message],
  84. goal_tree: Optional[GoalTree],
  85. ) -> List[Message]:
  86. """
  87. Level 1 压缩:移除 completed/abandoned goals 的非 goal 工具消息
  88. 对每个 completed/abandoned goal:
  89. - 保留:所有调用 goal 工具的 assistant 消息及其 tool result
  90. - 移除:所有非 goal 工具的 assistant 消息及其 tool result
  91. - 替换:goal(done=...) 的 tool result 内容为 "具体执行过程已清理"
  92. - goal_id 为 None 的消息始终保留(system prompt、初始 user message)
  93. - pending / in_progress goals 的消息不受影响
  94. 纯内存操作,不修改原始 Message 对象,不涉及持久化。
  95. Args:
  96. messages: 主路径上的有序消息列表(Message 对象)
  97. goal_tree: GoalTree 实例
  98. Returns:
  99. 压缩后的消息列表
  100. """
  101. if not goal_tree or not goal_tree.goals:
  102. return messages
  103. # 收集 completed/abandoned goal IDs
  104. completed_ids: Set[str] = {
  105. g.id for g in goal_tree.goals
  106. if g.status in ("completed", "abandoned")
  107. }
  108. if not completed_ids:
  109. return messages
  110. # Pass 1: 扫描 assistant 消息,分类 tool_call_ids
  111. remove_seqs: Set[int] = set() # 要移除的 assistant 消息 sequence
  112. remove_tc_ids: Set[str] = set() # 要移除的 tool result 的 tool_call_id
  113. done_tc_ids: Set[str] = set() # goal(done=...) 的 tool_call_id(替换 tool result)
  114. for msg in messages:
  115. if msg.goal_id not in completed_ids:
  116. continue
  117. if msg.role != "assistant":
  118. continue
  119. content = msg.content
  120. tc_list = []
  121. if isinstance(content, dict):
  122. tc_list = content.get("tool_calls", [])
  123. if not tc_list:
  124. # 纯文本 assistant 消息(无工具调用),移除
  125. remove_seqs.add(msg.sequence)
  126. continue
  127. # 检查是否包含 goal 工具调用
  128. has_goal_call = False
  129. for tc in tc_list:
  130. func_name = tc.get("function", {}).get("name", "")
  131. if func_name == "goal":
  132. has_goal_call = True
  133. # 检查是否为 done 调用
  134. args_str = tc.get("function", {}).get("arguments", "{}")
  135. try:
  136. args = json.loads(args_str) if isinstance(args_str, str) else (args_str or {})
  137. except json.JSONDecodeError:
  138. args = {}
  139. if args.get("done") is not None:
  140. tc_id = tc.get("id")
  141. if tc_id:
  142. done_tc_ids.add(tc_id)
  143. if not has_goal_call:
  144. # 不含 goal 工具调用 → 移除整条 assistant 及其所有 tool results
  145. remove_seqs.add(msg.sequence)
  146. for tc in tc_list:
  147. tc_id = tc.get("id")
  148. if tc_id:
  149. remove_tc_ids.add(tc_id)
  150. # 无需压缩
  151. if not remove_seqs and not done_tc_ids:
  152. return messages
  153. # Pass 2: 构建结果
  154. result: List[Message] = []
  155. for msg in messages:
  156. # 跳过标记移除的 assistant 消息
  157. if msg.sequence in remove_seqs:
  158. continue
  159. # 跳过标记移除的 tool result
  160. if msg.role == "tool" and msg.tool_call_id in remove_tc_ids:
  161. continue
  162. # 替换 done 的 tool result 内容
  163. if msg.role == "tool" and msg.tool_call_id in done_tc_ids:
  164. modified = copy.copy(msg)
  165. modified.content = {"tool_name": "goal", "result": "具体执行过程已清理"}
  166. result.append(modified)
  167. continue
  168. result.append(msg)
  169. return result
  170. # ===== Token 估算 =====
  171. def estimate_tokens(messages: List[Dict[str, Any]]) -> int:
  172. """
  173. 估算消息列表的 token 数量
  174. 对 CJK 字符和 ASCII 字符使用不同的估算系数:
  175. - ASCII/Latin 字符:~4 字符 ≈ 1 token
  176. - CJK 字符(中日韩):~1 字符 ≈ 1.5 tokens(BPE tokenizer 特性)
  177. """
  178. total_tokens = 0
  179. for msg in messages:
  180. content = msg.get("content", "")
  181. if isinstance(content, str):
  182. total_tokens += _estimate_text_tokens(content)
  183. elif isinstance(content, list):
  184. for part in content:
  185. if isinstance(part, dict):
  186. if part.get("type") == "text":
  187. total_tokens += _estimate_text_tokens(part.get("text", ""))
  188. elif part.get("type") in ("image_url", "image"):
  189. total_tokens += _estimate_image_tokens(part)
  190. # tool_calls
  191. tool_calls = msg.get("tool_calls")
  192. if tool_calls and isinstance(tool_calls, list):
  193. for tc in tool_calls:
  194. if isinstance(tc, dict):
  195. func = tc.get("function", {})
  196. total_tokens += len(func.get("name", "")) // 4
  197. args = func.get("arguments", "")
  198. if isinstance(args, str):
  199. total_tokens += _estimate_text_tokens(args)
  200. return total_tokens
  201. def _estimate_text_tokens(text: str) -> int:
  202. """
  203. 估算文本的 token 数,区分 CJK 和 ASCII 字符。
  204. CJK 字符在 BPE tokenizer 中通常占 1.5-2 tokens,
  205. ASCII 字符约 4 个对应 1 token。
  206. """
  207. if not text:
  208. return 0
  209. cjk_chars = 0
  210. other_chars = 0
  211. for ch in text:
  212. if _is_cjk(ch):
  213. cjk_chars += 1
  214. else:
  215. other_chars += 1
  216. # CJK: 1 char ≈ 1.5 tokens; ASCII: 4 chars ≈ 1 token
  217. return int(cjk_chars * 1.5) + other_chars // 4
  218. def _estimate_image_tokens(block: Dict[str, Any]) -> int:
  219. """
  220. 估算图片块的 token 消耗。
  221. Anthropic 计算方式:tokens = (width * height) / 750
  222. 优先从 _image_meta 读取真实尺寸,其次从 base64 数据量粗估,最小 1600 tokens。
  223. """
  224. MIN_IMAGE_TOKENS = 1600
  225. # 优先使用 _image_meta 中的真实尺寸
  226. meta = block.get("_image_meta")
  227. if meta and meta.get("width") and meta.get("height"):
  228. tokens = (meta["width"] * meta["height"]) // 750
  229. return max(MIN_IMAGE_TOKENS, tokens)
  230. # 回退:从 base64 数据长度粗估
  231. b64_data = ""
  232. if block.get("type") == "image":
  233. source = block.get("source", {})
  234. if source.get("type") == "base64":
  235. b64_data = source.get("data", "")
  236. elif block.get("type") == "image_url":
  237. url_obj = block.get("image_url", {})
  238. url = url_obj.get("url", "") if isinstance(url_obj, dict) else str(url_obj)
  239. if url.startswith("data:"):
  240. _, _, b64_data = url.partition(",")
  241. if b64_data:
  242. # base64 编码后大小约为原始字节的 4/3
  243. raw_bytes = len(b64_data) * 3 // 4
  244. # 粗估:假设 JPEG 压缩率 ~10:1,像素数 ≈ raw_bytes * 10 / 3 (RGB)
  245. estimated_pixels = raw_bytes * 10 // 3
  246. estimated_tokens = estimated_pixels // 750
  247. return max(MIN_IMAGE_TOKENS, estimated_tokens)
  248. return MIN_IMAGE_TOKENS
  249. def _is_cjk(ch: str) -> bool:
  250. """判断字符是否为 CJK(中日韩)字符"""
  251. cp = ord(ch)
  252. return (
  253. 0x2E80 <= cp <= 0x9FFF # CJK 基本区 + 部首 + 笔画 + 兼容
  254. or 0xF900 <= cp <= 0xFAFF # CJK 兼容表意文字
  255. or 0xFE30 <= cp <= 0xFE4F # CJK 兼容形式
  256. or 0x20000 <= cp <= 0x2FA1F # CJK 扩展 B-F + 兼容补充
  257. or 0x3000 <= cp <= 0x303F # CJK 标点符号
  258. or 0xFF00 <= cp <= 0xFFEF # 全角字符
  259. )
  260. def estimate_tokens_from_messages(messages: List[Message]) -> int:
  261. """从 Message 对象列表估算 token 数"""
  262. return estimate_tokens([msg.to_llm_dict() for msg in messages])
  263. def needs_level2_compression(
  264. token_count: int,
  265. config: CompressionConfig,
  266. model: str = "",
  267. ) -> bool:
  268. """判断是否需要触发 Level 2 压缩"""
  269. limit = config.get_max_tokens(model) if model else config.max_tokens
  270. return token_count > limit
  271. # ===== Level 2: 压缩 Prompt =====
  272. # 注意:这些 prompt 已迁移到 agent.core.prompts
  273. # COMPRESSION_EVAL_PROMPT 和 REFLECT_PROMPT 现在从 prompts.py 导入
  274. def build_compression_prompt(goal_tree: Optional[GoalTree]) -> str:
  275. """构建 Level 2 压缩 prompt"""
  276. goal_prompt = ""
  277. if goal_tree:
  278. goal_prompt = goal_tree.to_prompt(include_summary=True)
  279. return build_compression_eval_prompt(
  280. goal_tree_prompt=goal_prompt,
  281. )
  282. def build_reflect_prompt() -> str:
  283. """构建反思 prompt"""
  284. return REFLECT_PROMPT