models.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. """
  2. Trace 和 Message 数据模型
  3. Trace: 一次完整的 LLM 交互(单次调用或 Agent 任务)
  4. Message: Trace 中的 LLM 消息,对应 LLM API 格式
  5. """
  6. from dataclasses import dataclass, field
  7. from datetime import datetime
  8. from typing import Dict, Any, List, Optional, Literal, Union
  9. import uuid
  10. # ===== 消息线格式类型别名 =====
  11. # 轻量 wire-format 类型,用于工具参数和 runner/LLM API 接口。
  12. # 内部存储使用下方的 Message dataclass。
  13. ChatMessage = Dict[str, Any] # 单条 OpenAI 格式消息
  14. Messages = List[ChatMessage] # 消息列表
  15. MessageContent = Union[str, List[Dict[str, str]]] # content 字段(文本或多模态)
  16. # 导入 TokenUsage(延迟导入避免循环依赖)
  17. def _get_token_usage_class():
  18. from ..llm.usage import TokenUsage
  19. return TokenUsage
  20. @dataclass
  21. class Trace:
  22. """
  23. 执行轨迹 - 一次完整的 LLM 交互
  24. 单次调用: mode="call"
  25. Agent 模式: mode="agent"
  26. 主 Trace 和 Sub-Trace 使用相同的数据结构。
  27. Sub-Trace 通过 parent_trace_id 和 parent_goal_id 关联父 Trace。
  28. """
  29. trace_id: str
  30. mode: Literal["call", "agent"]
  31. # Prompt 标识(可选)
  32. prompt_name: Optional[str] = None
  33. # Agent 模式特有
  34. task: Optional[str] = None
  35. agent_type: Optional[str] = None
  36. # 父子关系(Sub-Trace 特有)
  37. parent_trace_id: Optional[str] = None # 父 Trace ID
  38. parent_goal_id: Optional[str] = None # 哪个 Goal 启动的
  39. # 状态
  40. status: Literal["running", "completed", "failed", "stopped"] = "running"
  41. # 统计
  42. total_messages: int = 0 # 消息总数(改名自 total_steps)
  43. total_tokens: int = 0 # 总 tokens(向后兼容,= prompt + completion)
  44. total_prompt_tokens: int = 0 # 总输入 tokens
  45. total_completion_tokens: int = 0 # 总输出 tokens
  46. total_reasoning_tokens: int = 0 # 总推理 tokens(o1/o3, DeepSeek R1, Gemini thinking)
  47. total_cache_creation_tokens: int = 0 # 总缓存创建 tokens(Claude)
  48. total_cache_read_tokens: int = 0 # 总缓存读取 tokens(Claude)
  49. total_cost: float = 0.0
  50. total_duration_ms: int = 0 # 总耗时(毫秒)
  51. # 进度追踪(head)
  52. last_sequence: int = 0 # 最新 message 的 sequence
  53. head_sequence: int = 0 # 当前主路径的头节点 sequence(用于 build_llm_messages)
  54. last_event_id: int = 0 # 最新事件 ID(用于 WS 续传)
  55. # 配置
  56. uid: Optional[str] = None
  57. model: Optional[str] = None # 默认模型
  58. tools: Optional[List[Dict]] = None # 工具定义(整个 trace 共享)
  59. llm_params: Dict[str, Any] = field(default_factory=dict) # LLM 参数(temperature 等)
  60. context: Dict[str, Any] = field(default_factory=dict) # 其他元数据
  61. # 当前焦点 goal
  62. current_goal_id: Optional[str] = None
  63. # 结果
  64. result_summary: Optional[str] = None # 执行结果摘要
  65. error_message: Optional[str] = None # 错误信息
  66. # 时间
  67. created_at: datetime = field(default_factory=datetime.now)
  68. completed_at: Optional[datetime] = None
  69. @classmethod
  70. def create(
  71. cls,
  72. mode: Literal["call", "agent"],
  73. **kwargs
  74. ) -> "Trace":
  75. """创建新的 Trace"""
  76. return cls(
  77. trace_id=str(uuid.uuid4()),
  78. mode=mode,
  79. **kwargs
  80. )
  81. def to_dict(self) -> Dict[str, Any]:
  82. """转换为字典"""
  83. return {
  84. "trace_id": self.trace_id,
  85. "mode": self.mode,
  86. "prompt_name": self.prompt_name,
  87. "task": self.task,
  88. "agent_type": self.agent_type,
  89. "parent_trace_id": self.parent_trace_id,
  90. "parent_goal_id": self.parent_goal_id,
  91. "status": self.status,
  92. "total_messages": self.total_messages,
  93. "total_tokens": self.total_tokens,
  94. "total_prompt_tokens": self.total_prompt_tokens,
  95. "total_completion_tokens": self.total_completion_tokens,
  96. "total_reasoning_tokens": self.total_reasoning_tokens,
  97. "total_cache_creation_tokens": self.total_cache_creation_tokens,
  98. "total_cache_read_tokens": self.total_cache_read_tokens,
  99. "total_cost": self.total_cost,
  100. "total_duration_ms": self.total_duration_ms,
  101. "last_sequence": self.last_sequence,
  102. "head_sequence": self.head_sequence,
  103. "last_event_id": self.last_event_id,
  104. "uid": self.uid,
  105. "model": self.model,
  106. "tools": self.tools,
  107. "llm_params": self.llm_params,
  108. "context": self.context,
  109. "current_goal_id": self.current_goal_id,
  110. "result_summary": self.result_summary,
  111. "error_message": self.error_message,
  112. "created_at": self.created_at.isoformat() if self.created_at else None,
  113. "completed_at": self.completed_at.isoformat() if self.completed_at else None,
  114. }
  115. @dataclass
  116. class Message:
  117. """
  118. 执行消息 - Trace 中的 LLM 消息
  119. 对应 LLM API 消息格式(system/user/assistant/tool),通过 goal_id 关联 Goal。
  120. description 字段自动生成规则:
  121. - system: 取 content 前 200 字符
  122. - user: 取 content 前 200 字符
  123. - assistant: 优先取 content,若无 content 则生成 "tool call: XX, XX"
  124. - tool: 使用 tool name
  125. """
  126. message_id: str
  127. trace_id: str
  128. role: Literal["system", "user", "assistant", "tool"] # 和 LLM API 一致
  129. sequence: int # 全局顺序
  130. parent_sequence: Optional[int] = None # 父消息的 sequence(构成消息树)
  131. status: Literal["active", "abandoned"] = "active" # [已弃用] 由 parent_sequence 树结构替代
  132. goal_id: Optional[str] = None # 关联的 Goal 内部 ID(None = 还没有创建 Goal)
  133. description: str = "" # 消息描述(系统自动生成)
  134. tool_call_id: Optional[str] = None # tool 消息关联对应的 tool_call
  135. content: Any = None # 消息内容(和 LLM API 格式一致)
  136. # 元数据
  137. prompt_tokens: Optional[int] = None # 输入 tokens
  138. completion_tokens: Optional[int] = None # 输出 tokens
  139. reasoning_tokens: Optional[int] = None # 推理 tokens(o1/o3, DeepSeek R1, Gemini thinking)
  140. cache_creation_tokens: Optional[int] = None # 缓存创建 tokens(Claude)
  141. cache_read_tokens: Optional[int] = None # 缓存读取 tokens(Claude)
  142. cost: Optional[float] = None
  143. duration_ms: Optional[int] = None
  144. created_at: datetime = field(default_factory=datetime.now)
  145. abandoned_at: Optional[datetime] = None # [已弃用] 由 parent_sequence 树结构替代
  146. # LLM 响应信息(仅 role="assistant" 时使用)
  147. finish_reason: Optional[str] = None # stop, length, tool_calls, content_filter 等
  148. @property
  149. def tokens(self) -> int:
  150. """动态计算总 tokens(向后兼容,input + output)"""
  151. return (self.prompt_tokens or 0) + (self.completion_tokens or 0)
  152. @property
  153. def all_tokens(self) -> int:
  154. """所有 tokens(包括 reasoning)"""
  155. return self.tokens + (self.reasoning_tokens or 0)
  156. def get_usage(self):
  157. """获取 TokenUsage 对象"""
  158. TokenUsage = _get_token_usage_class()
  159. return TokenUsage(
  160. input_tokens=self.prompt_tokens or 0,
  161. output_tokens=self.completion_tokens or 0,
  162. reasoning_tokens=self.reasoning_tokens or 0,
  163. cache_creation_tokens=self.cache_creation_tokens or 0,
  164. cache_read_tokens=self.cache_read_tokens or 0,
  165. )
  166. def to_llm_dict(self) -> Dict[str, Any]:
  167. """转换为 OpenAI SDK 格式的消息字典(用于 LLM 调用)"""
  168. msg: Dict[str, Any] = {"role": self.role}
  169. if self.role == "tool":
  170. # tool message: tool_call_id + name + content(string)
  171. if self.tool_call_id:
  172. msg["tool_call_id"] = self.tool_call_id
  173. msg["name"] = self.description or "unknown"
  174. if isinstance(self.content, dict):
  175. msg["content"] = str(self.content.get("result", self.content))
  176. else:
  177. msg["content"] = str(self.content) if self.content is not None else ""
  178. elif self.role == "assistant":
  179. # assistant message: content(text) + tool_calls
  180. if isinstance(self.content, dict):
  181. msg["content"] = self.content.get("text", "") or ""
  182. if self.content.get("tool_calls"):
  183. msg["tool_calls"] = self.content["tool_calls"]
  184. elif isinstance(self.content, str):
  185. msg["content"] = self.content
  186. else:
  187. msg["content"] = ""
  188. else:
  189. # system / user message: content 直接传
  190. msg["content"] = self.content
  191. return msg
  192. @classmethod
  193. def from_llm_dict(
  194. cls,
  195. d: Dict[str, Any],
  196. trace_id: str,
  197. sequence: int,
  198. goal_id: Optional[str] = None,
  199. parent_sequence: Optional[int] = None,
  200. ) -> "Message":
  201. """从 OpenAI SDK 格式创建 Message"""
  202. role = d["role"]
  203. if role == "assistant":
  204. content = {"text": d.get("content", ""), "tool_calls": d.get("tool_calls")}
  205. elif role == "tool":
  206. content = {"tool_name": d.get("name", "unknown"), "result": d.get("content", "")}
  207. else:
  208. content = d.get("content", "")
  209. return cls.create(
  210. trace_id=trace_id,
  211. role=role,
  212. sequence=sequence,
  213. goal_id=goal_id,
  214. parent_sequence=parent_sequence,
  215. content=content,
  216. tool_call_id=d.get("tool_call_id"),
  217. )
  218. @classmethod
  219. def from_dict(cls, data: Dict[str, Any]) -> "Message":
  220. """从字典创建 Message(处理向后兼容)"""
  221. # 过滤掉已删除的字段
  222. filtered_data = {k: v for k, v in data.items() if k not in ["tokens", "available_tools"]}
  223. # 解析 datetime
  224. if filtered_data.get("created_at") and isinstance(filtered_data["created_at"], str):
  225. filtered_data["created_at"] = datetime.fromisoformat(filtered_data["created_at"])
  226. if filtered_data.get("abandoned_at") and isinstance(filtered_data["abandoned_at"], str):
  227. filtered_data["abandoned_at"] = datetime.fromisoformat(filtered_data["abandoned_at"])
  228. # 向后兼容:旧消息没有 status 字段,默认 active
  229. if "status" not in filtered_data:
  230. filtered_data["status"] = "active"
  231. # 向后兼容:旧消息没有 parent_sequence 字段
  232. if "parent_sequence" not in filtered_data:
  233. filtered_data["parent_sequence"] = None
  234. return cls(**filtered_data)
  235. @classmethod
  236. def create(
  237. cls,
  238. trace_id: str,
  239. role: Literal["system", "user", "assistant", "tool"],
  240. sequence: int,
  241. goal_id: Optional[str] = None,
  242. content: Any = None,
  243. tool_call_id: Optional[str] = None,
  244. parent_sequence: Optional[int] = None,
  245. prompt_tokens: Optional[int] = None,
  246. completion_tokens: Optional[int] = None,
  247. reasoning_tokens: Optional[int] = None,
  248. cache_creation_tokens: Optional[int] = None,
  249. cache_read_tokens: Optional[int] = None,
  250. cost: Optional[float] = None,
  251. duration_ms: Optional[int] = None,
  252. finish_reason: Optional[str] = None,
  253. ) -> "Message":
  254. """创建新的 Message,自动生成 description"""
  255. description = cls._generate_description(role, content)
  256. return cls(
  257. message_id=f"{trace_id}-{sequence:04d}",
  258. trace_id=trace_id,
  259. role=role,
  260. sequence=sequence,
  261. parent_sequence=parent_sequence,
  262. goal_id=goal_id,
  263. content=content,
  264. description=description,
  265. tool_call_id=tool_call_id,
  266. prompt_tokens=prompt_tokens,
  267. completion_tokens=completion_tokens,
  268. reasoning_tokens=reasoning_tokens,
  269. cache_creation_tokens=cache_creation_tokens,
  270. cache_read_tokens=cache_read_tokens,
  271. cost=cost,
  272. duration_ms=duration_ms,
  273. finish_reason=finish_reason,
  274. )
  275. @staticmethod
  276. def _generate_description(role: str, content: Any) -> str:
  277. """
  278. 自动生成 description
  279. - system: 取 content 前 200 字符
  280. - user: 取 content 前 200 字符
  281. - assistant: 优先取 content,若无 content 则生成 "tool call: XX, XX"
  282. - tool: 使用 tool name
  283. """
  284. if role == "system":
  285. # system 消息:直接截取文本
  286. if isinstance(content, str):
  287. return content[:200] + "..." if len(content) > 200 else content
  288. return "system prompt"
  289. elif role == "user":
  290. # user 消息:直接截取文本
  291. if isinstance(content, str):
  292. return content[:200] + "..." if len(content) > 200 else content
  293. return "user message"
  294. elif role == "assistant":
  295. # assistant 消息:content 是字典,可能包含 text 和 tool_calls
  296. if isinstance(content, dict):
  297. # 优先返回文本内容
  298. if content.get("text"):
  299. text = content["text"]
  300. # 截断过长的文本
  301. return text[:200] + "..." if len(text) > 200 else text
  302. # 如果没有文本,检查 tool_calls
  303. if content.get("tool_calls"):
  304. tool_calls = content["tool_calls"]
  305. if isinstance(tool_calls, list):
  306. tool_names = []
  307. for tc in tool_calls:
  308. if isinstance(tc, dict) and tc.get("function", {}).get("name"):
  309. tool_names.append(tc["function"]["name"])
  310. if tool_names:
  311. return f"tool call: {', '.join(tool_names)}"
  312. # 如果 content 是字符串
  313. if isinstance(content, str):
  314. return content[:200] + "..." if len(content) > 200 else content
  315. return "assistant message"
  316. elif role == "tool":
  317. # tool 消息:从 content 中提取 tool name
  318. if isinstance(content, dict):
  319. if content.get("tool_name"):
  320. return content["tool_name"]
  321. # 如果是字符串,尝试解析
  322. if isinstance(content, str):
  323. return content[:100] + "..." if len(content) > 100 else content
  324. return "tool result"
  325. return ""
  326. def to_dict(self) -> Dict[str, Any]:
  327. """转换为字典"""
  328. result = {
  329. "message_id": self.message_id,
  330. "trace_id": self.trace_id,
  331. "role": self.role,
  332. "sequence": self.sequence,
  333. "parent_sequence": self.parent_sequence,
  334. "status": self.status,
  335. "goal_id": self.goal_id,
  336. "tool_call_id": self.tool_call_id,
  337. "content": self.content,
  338. "description": self.description,
  339. "tokens": self.tokens, # 使用 @property 动态计算
  340. "prompt_tokens": self.prompt_tokens,
  341. "completion_tokens": self.completion_tokens,
  342. "cost": self.cost,
  343. "duration_ms": self.duration_ms,
  344. "finish_reason": self.finish_reason,
  345. "created_at": self.created_at.isoformat() if self.created_at else None,
  346. }
  347. # 只添加非空的可选字段
  348. if self.abandoned_at:
  349. result["abandoned_at"] = self.abandoned_at.isoformat()
  350. if self.reasoning_tokens:
  351. result["reasoning_tokens"] = self.reasoning_tokens
  352. if self.cache_creation_tokens:
  353. result["cache_creation_tokens"] = self.cache_creation_tokens
  354. if self.cache_read_tokens:
  355. result["cache_read_tokens"] = self.cache_read_tokens
  356. return result
  357. # ===== 已弃用:Step 模型(保留用于向后兼容)=====
  358. # Step 类型
  359. StepType = Literal[
  360. "goal", "thought", "evaluation", "response",
  361. "action", "result", "memory_read", "memory_write",
  362. ]
  363. # Step 状态
  364. StepStatus = Literal[
  365. "planned", "in_progress", "awaiting_approval",
  366. "completed", "failed", "skipped",
  367. ]
  368. @dataclass
  369. class Step:
  370. """
  371. [已弃用] 执行步骤 - 使用 Message 模型替代
  372. 保留用于向后兼容
  373. """
  374. step_id: str
  375. trace_id: str
  376. step_type: StepType
  377. status: StepStatus
  378. sequence: int
  379. parent_id: Optional[str] = None
  380. description: str = ""
  381. data: Dict[str, Any] = field(default_factory=dict)
  382. summary: Optional[str] = None
  383. has_children: bool = False
  384. children_count: int = 0
  385. duration_ms: Optional[int] = None
  386. tokens: Optional[int] = None
  387. cost: Optional[float] = None
  388. created_at: datetime = field(default_factory=datetime.now)
  389. @classmethod
  390. def create(
  391. cls,
  392. trace_id: str,
  393. step_type: StepType,
  394. sequence: int,
  395. status: StepStatus = "completed",
  396. description: str = "",
  397. data: Dict[str, Any] = None,
  398. parent_id: Optional[str] = None,
  399. summary: Optional[str] = None,
  400. duration_ms: Optional[int] = None,
  401. tokens: Optional[int] = None,
  402. cost: Optional[float] = None,
  403. ) -> "Step":
  404. """创建新的 Step"""
  405. return cls(
  406. step_id=str(uuid.uuid4()),
  407. trace_id=trace_id,
  408. step_type=step_type,
  409. status=status,
  410. sequence=sequence,
  411. parent_id=parent_id,
  412. description=description,
  413. data=data or {},
  414. summary=summary,
  415. duration_ms=duration_ms,
  416. tokens=tokens,
  417. cost=cost,
  418. )
  419. def to_dict(self, view: str = "full") -> Dict[str, Any]:
  420. """
  421. 转换为字典
  422. Args:
  423. view: "compact" - 不返回大字段
  424. "full" - 返回完整数据
  425. """
  426. result = {
  427. "step_id": self.step_id,
  428. "trace_id": self.trace_id,
  429. "step_type": self.step_type,
  430. "status": self.status,
  431. "sequence": self.sequence,
  432. "parent_id": self.parent_id,
  433. "description": self.description,
  434. "summary": self.summary,
  435. "has_children": self.has_children,
  436. "children_count": self.children_count,
  437. "duration_ms": self.duration_ms,
  438. "tokens": self.tokens,
  439. "cost": self.cost,
  440. "created_at": self.created_at.isoformat() if self.created_at else None,
  441. }
  442. # 处理 data 字段
  443. if view == "compact":
  444. data_copy = self.data.copy()
  445. for key in ["output", "content", "full_output", "full_content"]:
  446. data_copy.pop(key, None)
  447. result["data"] = data_copy
  448. else:
  449. result["data"] = self.data
  450. return result