Просмотр исходного кода

Merge branch 'main' into dev_tao

guantao 6 дней назад
Родитель
Сommit
e1e311ae89

+ 88 - 58
agent/core/runner.py

@@ -1101,18 +1101,15 @@ class AgentRunner:
             # 构建 LLM messages(注入上下文)
             # 构建 LLM messages(注入上下文)
             llm_messages = list(history)
             llm_messages = list(history)
 
 
-            # 收集需要持久化的 system 消息
-            system_messages_to_persist = []
-
-            # 研究流程引导(仅在启用且处于研究阶段时)
-            research_state = self._get_research_state(trace_id)
-            if research_state and research_state["stage"] != "execution":
-                research_guide = self._build_research_guide(research_state)
-                if research_guide:
-                    system_msg = {"role": "system", "content": research_guide}
-                    llm_messages.append(system_msg)
-                    system_messages_to_persist.append(("研究流程引导", system_msg))
+            # 先对历史消息应用 Prompt Caching(在注入动态内容之前)
+            # 这样可以确保历史消息的缓存点固定,不受动态注入影响
+            llm_messages = self._add_cache_control(
+                llm_messages,
+                config.model,
+                config.enable_prompt_caching
+            )
 
 
+            # 然后追加动态注入的内容(不影响已缓存的历史消息)
             # 周期性注入 GoalTree + Collaborators
             # 周期性注入 GoalTree + Collaborators
             if iteration % CONTEXT_INJECTION_INTERVAL == 0:
             if iteration % CONTEXT_INJECTION_INTERVAL == 0:
                 context_injection = self._build_context_injection(trace, goal_tree)
                 context_injection = self._build_context_injection(trace, goal_tree)
@@ -1149,6 +1146,7 @@ class AgentRunner:
                         logger.warning("经验检索失败: %s", e)
                         logger.warning("经验检索失败: %s", e)
                         _cached_exp_text = ""
                         _cached_exp_text = ""
 
 
+            # 经验注入:goal切换时注入相关历史经验
             if _cached_exp_text:
             if _cached_exp_text:
                 system_msg = {"role": "system", "content": _cached_exp_text}
                 system_msg = {"role": "system", "content": _cached_exp_text}
                 llm_messages.append(system_msg)
                 llm_messages.append(system_msg)
@@ -1175,12 +1173,6 @@ class AgentRunner:
                     head_seq = sequence
                     head_seq = sequence
                     sequence += 1
                     sequence += 1
 
 
-            # 应用 Prompt Caching(不修改原始 history,只在发送给 LLM 时添加缓存标记)
-            llm_messages = self._add_cache_control(
-                llm_messages,
-                config.model,
-                config.enable_prompt_caching
-            )
 
 
             # 调用 LLM
             # 调用 LLM
             result = await self.llm_call(
             result = await self.llm_call(
@@ -1931,9 +1923,10 @@ created_at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
         """
         """
         为支持的模型添加 Prompt Caching 标记
         为支持的模型添加 Prompt Caching 标记
 
 
-        策略:
-        1. system message 添加缓存(如果存在且足够长)
-        2. 倒数第 3-5 条 user/assistant 消息添加缓存点
+        策略:固定位置 + 延迟查找
+        1. system message 添加缓存(如果足够长)
+        2. 固定位置缓存点(20, 40, 60, 80),确保每个缓存点间隔 >= 1024 tokens
+        3. 最多使用 4 个缓存点(含 system)
 
 
         Args:
         Args:
             messages: 原始消息列表
             messages: 原始消息列表
@@ -1955,62 +1948,99 @@ created_at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
         messages = copy.deepcopy(messages)
         messages = copy.deepcopy(messages)
 
 
         # 策略 1: 为 system message 添加缓存
         # 策略 1: 为 system message 添加缓存
+        system_cached = False
         for msg in messages:
         for msg in messages:
             if msg.get("role") == "system":
             if msg.get("role") == "system":
                 content = msg.get("content", "")
                 content = msg.get("content", "")
-                # 只有足够长的 system prompt 才值得缓存(>1024 tokens 约 4000 字符)
                 if isinstance(content, str) and len(content) > 1000:
                 if isinstance(content, str) and len(content) > 1000:
-                    # Anthropic API 格式:在 content 的最后一个 block 添加 cache_control
-                    # 如果 content 是 string,需要转换为 list 格式
-                    msg["content"] = [
-                        {
-                            "type": "text",
-                            "text": content,
-                            "cache_control": {"type": "ephemeral"}
-                        }
-                    ]
+                    msg["content"] = [{
+                        "type": "text",
+                        "text": content,
+                        "cache_control": {"type": "ephemeral"}
+                    }]
+                    system_cached = True
                     logger.debug(f"[Cache] 为 system message 添加缓存标记 (len={len(content)})")
                     logger.debug(f"[Cache] 为 system message 添加缓存标记 (len={len(content)})")
                 break
                 break
 
 
-        # 策略 2: 为倒数第 3-5 条消息添加缓存点
-        # 这样可以缓存大部分历史对话,只有最新的几条消息是新的
+        # 策略 2: 固定位置缓存点
+        CACHE_INTERVAL = 20
+        MAX_POINTS = 3 if system_cached else 4
+        MIN_TOKENS = 1024
+        AVG_TOKENS_PER_MSG = 70
+
+        total_msgs = len(messages)
+        if total_msgs == 0:
+            return messages
+
         cache_positions = []
         cache_positions = []
-        user_assistant_msgs = [
-            (i, msg) for i, msg in enumerate(messages)
-            if msg.get("role") in ("user", "assistant")
-        ]
-
-        if len(user_assistant_msgs) >= 5:
-            # 在倒数第 5 条添加缓存点
-            cache_positions.append(user_assistant_msgs[-5][0])
-        elif len(user_assistant_msgs) >= 3:
-            # 在倒数第 3 条添加缓存点
-            cache_positions.append(user_assistant_msgs[-3][0])
+        last_cache_pos = 0
+
+        for i in range(1, MAX_POINTS + 1):
+            target_pos = i * CACHE_INTERVAL - 1  # 19, 39, 59, 79
 
 
+            if target_pos >= total_msgs:
+                break
+
+            # 从目标位置开始查找合适的 user/assistant 消息
+            for j in range(target_pos, total_msgs):
+                msg = messages[j]
+
+                if msg.get("role") not in ("user", "assistant"):
+                    continue
+
+                content = msg.get("content", "")
+                if not content:
+                    continue
+
+                # 检查 content 是否非空
+                is_valid = False
+                if isinstance(content, str):
+                    is_valid = len(content) > 0
+                elif isinstance(content, list):
+                    is_valid = any(
+                        isinstance(block, dict) and
+                        block.get("type") == "text" and
+                        len(block.get("text", "")) > 0
+                        for block in content
+                    )
+
+                if not is_valid:
+                    continue
+
+                # 检查 token 距离
+                msg_count = j - last_cache_pos
+                estimated_tokens = msg_count * AVG_TOKENS_PER_MSG
+
+                if estimated_tokens >= MIN_TOKENS:
+                    cache_positions.append(j)
+                    last_cache_pos = j
+                    logger.debug(f"[Cache] 在位置 {j} 添加缓存点 (估算 {estimated_tokens} tokens)")
+                    break
+
+        # 应用缓存标记
         for idx in cache_positions:
         for idx in cache_positions:
             msg = messages[idx]
             msg = messages[idx]
             content = msg.get("content", "")
             content = msg.get("content", "")
 
 
-            # 处理 string content
             if isinstance(content, str):
             if isinstance(content, str):
-                msg["content"] = [
-                    {
-                        "type": "text",
-                        "text": content,
-                        "cache_control": {"type": "ephemeral"}
-                    }
-                ]
+                msg["content"] = [{
+                    "type": "text",
+                    "text": content,
+                    "cache_control": {"type": "ephemeral"}
+                }]
                 logger.debug(f"[Cache] 为 message[{idx}] ({msg.get('role')}) 添加缓存标记")
                 logger.debug(f"[Cache] 为 message[{idx}] ({msg.get('role')}) 添加缓存标记")
-
-            # 处理 list content(多模态消息)
-            elif isinstance(content, list) and len(content) > 0:
+            elif isinstance(content, list):
                 # 在最后一个 text block 添加 cache_control
                 # 在最后一个 text block 添加 cache_control
-                for i in range(len(content) - 1, -1, -1):
-                    if isinstance(content[i], dict) and content[i].get("type") == "text":
-                        content[i]["cache_control"] = {"type": "ephemeral"}
-                        logger.debug(f"[Cache] 为 message[{idx}] ({msg.get('role')}) 的 content[{i}] 添加缓存标记")
+                for block in reversed(content):
+                    if isinstance(block, dict) and block.get("type") == "text":
+                        block["cache_control"] = {"type": "ephemeral"}
+                        logger.debug(f"[Cache] 为 message[{idx}] ({msg.get('role')}) 添加缓存标记")
                         break
                         break
 
 
+        logger.debug(
+            f"[Cache] 总消息: {total_msgs}, "
+            f"缓存点: {len(cache_positions)} at {cache_positions}"
+        )
         return messages
         return messages
 
 
     def _get_tool_schemas(self, tools: Optional[List[str]]) -> List[Dict]:
     def _get_tool_schemas(self, tools: Optional[List[str]]) -> List[Dict]:

+ 16 - 0
agent/llm/openrouter.py

@@ -511,6 +511,22 @@ async def _openrouter_anthropic_call(
     if "temperature" in kwargs:
     if "temperature" in kwargs:
         payload["temperature"] = kwargs["temperature"]
         payload["temperature"] = kwargs["temperature"]
 
 
+    # Debug: 检查 cache_control 是否存在
+    cache_control_count = 0
+    if isinstance(system_prompt, list):
+        for block in system_prompt:
+            if isinstance(block, dict) and "cache_control" in block:
+                cache_control_count += 1
+    for msg in anthropic_messages:
+        content = msg.get("content", "")
+        if isinstance(content, list):
+            for block in content:
+                if isinstance(block, dict) and "cache_control" in block:
+                    cache_control_count += 1
+    if cache_control_count > 0:
+        print(f"[OpenRouter/Anthropic] 发现 {cache_control_count} 个 cache_control 标记")
+        logger.info(f"[OpenRouter/Anthropic] 发现 {cache_control_count} 个 cache_control 标记")
+
     headers = {
     headers = {
         "Authorization": f"Bearer {api_key}",
         "Authorization": f"Bearer {api_key}",
         "anthropic-version": "2023-06-01",
         "anthropic-version": "2023-06-01",

+ 3 - 3
examples/how/tool/nanobanana.py

@@ -36,9 +36,9 @@ DEFAULT_IMAGE_PROMPT = (
 
 
 DEFAULT_IMAGE_MODEL_CANDIDATES = [
 DEFAULT_IMAGE_MODEL_CANDIDATES = [
     "google/gemini-2.5-flash-image",
     "google/gemini-2.5-flash-image",
-    "google/gemini-3-pro-image-preview",
-    "black-forest-labs/flux.2-flex",
-    "black-forest-labs/flux.2-pro",
+    # "google/gemini-3-pro-image-preview",
+    # "black-forest-labs/flux.2-flex",
+    # "black-forest-labs/flux.2-pro",
 ]
 ]
 
 
 
 

Разница между файлами не показана из-за своего большого размера
+ 719 - 43
frontend/react-template/package-lock.json


+ 6 - 1
frontend/react-template/package.json

@@ -16,11 +16,14 @@
     "d3": "^7.8.5",
     "d3": "^7.8.5",
     "react": "^18.2.0",
     "react": "^18.2.0",
     "react-dom": "^18.2.0",
     "react-dom": "^18.2.0",
+    "react-error-boundary": "^6.1.1",
     "react-markdown": "^10.1.0"
     "react-markdown": "^10.1.0"
   },
   },
   "devDependencies": {
   "devDependencies": {
     "@tailwindcss/postcss": "^4.0.0",
     "@tailwindcss/postcss": "^4.0.0",
     "@tailwindcss/vite": "^4.0.0",
     "@tailwindcss/vite": "^4.0.0",
+    "@testing-library/dom": "^10.4.1",
+    "@testing-library/react": "^16.3.2",
     "@types/d3": "^7.4.3",
     "@types/d3": "^7.4.3",
     "@types/node": "^20.11.5",
     "@types/node": "^20.11.5",
     "@types/react": "^18.2.43",
     "@types/react": "^18.2.43",
@@ -32,9 +35,11 @@
     "eslint": "^8.55.0",
     "eslint": "^8.55.0",
     "eslint-plugin-react-hooks": "^4.6.0",
     "eslint-plugin-react-hooks": "^4.6.0",
     "eslint-plugin-react-refresh": "^0.4.5",
     "eslint-plugin-react-refresh": "^0.4.5",
+    "jsdom": "^28.1.0",
     "postcss": "^8.4.38",
     "postcss": "^8.4.38",
     "tailwindcss": "^4.0.0",
     "tailwindcss": "^4.0.0",
     "typescript": "^5.2.2",
     "typescript": "^5.2.2",
-    "vite": "^5.0.8"
+    "vite": "^5.0.8",
+    "vitest": "^4.0.18"
   }
   }
 }
 }

+ 42 - 12
frontend/react-template/src/api/client.ts

@@ -1,23 +1,53 @@
 import axios from "axios";
 import axios from "axios";
+import { Toast } from "@douyinfe/semi-ui";
 
 
+// Determine base URL from environment variables, or fallback to default
 const DEFAULT_BASE_URL = "http://localhost:8000";
 const DEFAULT_BASE_URL = "http://localhost:8000";
 
 
-export const baseURL =
-  (typeof import.meta !== "undefined" &&
-    typeof import.meta.env !== "undefined" &&
-    (import.meta.env as { VITE_API_BASE_URL?: string }).VITE_API_BASE_URL) ||
-  DEFAULT_BASE_URL;
+// Handle various environment variable formats (Vite uses import.meta.env.VITE_*)
+const getBaseUrl = () => {
+  if (typeof import.meta !== "undefined" && import.meta.env && import.meta.env.VITE_API_BASE_URL) {
+    return import.meta.env.VITE_API_BASE_URL;
+  }
+  return DEFAULT_BASE_URL;
+};
 
 
-export const http = axios.create({
-  baseURL,
-  headers: {
-    "Content-Type": "application/json",
-  },
+export const client = axios.create({
+  baseURL: getBaseUrl(),
 });
 });
 
 
-export async function request<T>(path: string, init?: { method?: string; data?: unknown; params?: Record<string, unknown> }): Promise<T> {
+client.interceptors.response.use(
+  (response) => response,
+  (error) => {
+    // Check if error has a response (server responded with status code outside 2xx)
+    if (error.response) {
+      const { status, data } = error.response;
+      const message = data?.detail || data?.message || "请求失败";
+
+      // Handle specific status codes
+      if (status >= 500) {
+        Toast.error(`服务器错误 (${status}): ${message}`);
+      } else if (status >= 400) {
+        Toast.error(`请求错误 (${status}): ${message}`);
+      }
+    } else if (error.request) {
+      // The request was made but no response was received
+      Toast.error("网络错误: 无法连接到服务器");
+    } else {
+      // Something happened in setting up the request that triggered an Error
+      Toast.error(`请求配置错误: ${error.message}`);
+    }
+
+    return Promise.reject(error);
+  },
+);
+
+export async function request<T>(
+  path: string,
+  init?: { method?: string; data?: unknown; params?: Record<string, unknown> },
+): Promise<T> {
   const method = init?.method || "GET";
   const method = init?.method || "GET";
-  const response = await http.request<T>({
+  const response = await client.request<T>({
     url: path,
     url: path,
     method,
     method,
     params: init?.params,
     params: init?.params,

+ 4 - 3
frontend/react-template/src/components/DetailPanel/DetailPanel.tsx

@@ -1,3 +1,4 @@
+import ReactMarkdown from "react-markdown";
 import type { Goal } from "../../types/goal";
 import type { Goal } from "../../types/goal";
 import type { Edge, Message } from "../../types/message";
 import type { Edge, Message } from "../../types/message";
 import styles from "./DetailPanel.module.css";
 import styles from "./DetailPanel.module.css";
@@ -14,10 +15,10 @@ export const DetailPanel = ({ node, edge, messages = [], onClose }: DetailPanelP
 
 
   const renderMessageContent = (content: Message["content"]) => {
   const renderMessageContent = (content: Message["content"]) => {
     if (!content) return "";
     if (!content) return "";
-    if (typeof content === "string") return content;
+    if (typeof content === "string") return <ReactMarkdown>{content}</ReactMarkdown>;
 
 
     // 如果有 text,优先显示 text
     // 如果有 text,优先显示 text
-    if (content.text) return content.text;
+    if (content.text) return <ReactMarkdown>{content.text}</ReactMarkdown>;
 
 
     // 如果有 tool_calls,展示 tool_calls 信息
     // 如果有 tool_calls,展示 tool_calls 信息
     if (content.tool_calls && content.tool_calls.length > 0) {
     if (content.tool_calls && content.tool_calls.length > 0) {
@@ -36,7 +37,7 @@ export const DetailPanel = ({ node, edge, messages = [], onClose }: DetailPanelP
       );
       );
     }
     }
 
 
-    return JSON.stringify(content);
+    return <ReactMarkdown>{JSON.stringify(content)}</ReactMarkdown>;
   };
   };
 
 
   const isGoal = (node: Goal | Message): node is Goal => {
   const isGoal = (node: Goal | Message): node is Goal => {

+ 63 - 25
frontend/react-template/src/components/FlowChart/hooks/useFlowChartData.ts

@@ -50,9 +50,11 @@ export const useFlowChartData = (traceId: string | null, refreshTrigger?: number
   const [sinceEventId, setSinceEventId] = useState(0);
   const [sinceEventId, setSinceEventId] = useState(0);
   const [readyToConnect, setReadyToConnect] = useState(false);
   const [readyToConnect, setReadyToConnect] = useState(false);
   const currentEventIdRef = useRef(0);
   const currentEventIdRef = useRef(0);
+  const maxSequenceRef = useRef(0);
   const restReloadingRef = useRef(false);
   const restReloadingRef = useRef(false);
   const [reloading, setReloading] = useState(false);
   const [reloading, setReloading] = useState(false);
   const [invalidBranches, setInvalidBranches] = useState<Message[][]>([]);
   const [invalidBranches, setInvalidBranches] = useState<Message[][]>([]);
+  const [traceCompleted, setTraceCompleted] = useState(false);
 
 
   const messageComparator = useCallback((a: Message, b: Message): number => {
   const messageComparator = useCallback((a: Message, b: Message): number => {
     // eslint-disable-next-line @typescript-eslint/no-explicit-any
     // eslint-disable-next-line @typescript-eslint/no-explicit-any
@@ -94,7 +96,9 @@ export const useFlowChartData = (traceId: string | null, refreshTrigger?: number
     setSinceEventId(0);
     setSinceEventId(0);
     setReadyToConnect(false);
     setReadyToConnect(false);
     currentEventIdRef.current = 0;
     currentEventIdRef.current = 0;
+    maxSequenceRef.current = 0;
     restReloadingRef.current = false;
     restReloadingRef.current = false;
+    setTraceCompleted(false);
   }, [traceId]);
   }, [traceId]);
 
 
   const reloadViaRest = useCallback(async () => {
   const reloadViaRest = useCallback(async () => {
@@ -153,6 +157,13 @@ export const useFlowChartData = (traceId: string | null, refreshTrigger?: number
 
 
         const { availableData: finalMessages, invalidBranches: invalidBranchesTemp } = processRetryLogic(nextMessages);
         const { availableData: finalMessages, invalidBranches: invalidBranchesTemp } = processRetryLogic(nextMessages);
 
 
+        // Update max sequence
+        const maxSeq = finalMessages.reduce((max, msg) => {
+          const seq = typeof msg.sequence === "number" ? msg.sequence : -1;
+          return Math.max(max, seq);
+        }, 0);
+        maxSequenceRef.current = maxSeq;
+
         setMessages(finalMessages);
         setMessages(finalMessages);
         setInvalidBranches(invalidBranchesTemp);
         setInvalidBranches(invalidBranchesTemp);
         const grouped: Record<string, Message[]> = {};
         const grouped: Record<string, Message[]> = {};
@@ -250,32 +261,41 @@ export const useFlowChartData = (traceId: string | null, refreshTrigger?: number
         if (typeof currentEventId === "number") {
         if (typeof currentEventId === "number") {
           currentEventIdRef.current = Math.max(currentEventIdRef.current, currentEventId);
           currentEventIdRef.current = Math.max(currentEventIdRef.current, currentEventId);
         }
         }
+        return;
+      }
 
 
-        const trace = isRecord(data.trace) ? data.trace : undefined;
-        const rawTrace = isRecord(raw.trace) ? raw.trace : undefined;
-        const goalTree =
-          (isRecord(data.goal_tree) ? data.goal_tree : undefined) ||
-          (trace && isRecord(trace.goal_tree) ? trace.goal_tree : undefined) ||
-          (isRecord(raw.goal_tree) ? raw.goal_tree : undefined) ||
-          (rawTrace && isRecord(rawTrace.goal_tree) ? rawTrace.goal_tree : undefined) ||
-          {};
-        const goalList = isRecord(goalTree) ? goalTree.goals : undefined;
-        const nextGoals = Array.isArray(goalList) ? (goalList as Goal[]) : [];
-        setGoals((prev) => {
-          const mergedFlat = nextGoals.map((ng) => {
-            const existing = prev.find((p) => p.id === ng.id);
-            if (!existing) return ng;
-            const merged: Goal = { ...existing, ...ng };
-            if (existing.sub_trace_ids && !merged.sub_trace_ids) {
-              merged.sub_trace_ids = existing.sub_trace_ids;
-            }
-            if (existing.agent_call_mode && !merged.agent_call_mode) {
-              merged.agent_call_mode = existing.agent_call_mode;
-            }
-            return merged;
+      if (event === "rewind") {
+        console.log("Processing rewind event:", data);
+        const afterSequence =
+          (typeof data.after_sequence === "number" ? data.after_sequence : undefined) ||
+          (typeof raw.after_sequence === "number" ? raw.after_sequence : undefined);
+
+        if (typeof afterSequence === "number") {
+          maxSequenceRef.current = afterSequence;
+          setMessages((prev) =>
+            prev.filter((msg) => (typeof msg.sequence === "number" ? msg.sequence : -1) <= afterSequence),
+          );
+
+          setMsgGroups((prev) => {
+            const next: Record<string, Message[]> = {};
+            Object.entries(prev).forEach(([k, v]) => {
+              const filtered = v.filter(
+                (msg) => (typeof msg.sequence === "number" ? msg.sequence : -1) <= afterSequence,
+              );
+              if (filtered.length > 0) next[k] = filtered;
+            });
+            return next;
           });
           });
-          return buildSubGoals(mergedFlat);
-        });
+
+          // 如果有 goal_tree_snapshot,直接更新 Goals
+          const snapshot = isRecord(data.goal_tree_snapshot) ? data.goal_tree_snapshot : undefined;
+          if (snapshot && Array.isArray(snapshot.goals)) {
+            setGoals(buildSubGoals(snapshot.goals as Goal[]));
+          } else {
+            // 否则触发重载
+            void reloadViaRest();
+          }
+        }
         return;
         return;
       }
       }
 
 
@@ -333,9 +353,27 @@ export const useFlowChartData = (traceId: string | null, refreshTrigger?: number
         return;
         return;
       }
       }
 
 
+      if (event === "trace_completed") {
+        setTraceCompleted(true);
+        return;
+      }
+
       if (event === "message_added") {
       if (event === "message_added") {
         const message = isMessage(data.message) ? data.message : isMessage(raw.message) ? raw.message : null;
         const message = isMessage(data.message) ? data.message : isMessage(raw.message) ? raw.message : null;
         if (message) {
         if (message) {
+          // Check sequence continuity
+          const seq = typeof message.sequence === "number" ? message.sequence : -1;
+          if (seq > 0) {
+            if (maxSequenceRef.current > 0 && seq > maxSequenceRef.current + 1) {
+              console.warn(
+                `Message sequence gap detected: current max ${maxSequenceRef.current}, received ${seq}. Triggering reload.`,
+              );
+              void reloadViaRest();
+              return;
+            }
+            maxSequenceRef.current = Math.max(maxSequenceRef.current, seq);
+          }
+
           setMessages((prev) => {
           setMessages((prev) => {
             const next = [...prev, message];
             const next = [...prev, message];
             next.sort(messageComparator);
             next.sort(messageComparator);
@@ -356,5 +394,5 @@ export const useFlowChartData = (traceId: string | null, refreshTrigger?: number
   // 只有当 traceId 存在且 REST 加载完成 (readyToConnect) 后才连接 WebSocket
   // 只有当 traceId 存在且 REST 加载完成 (readyToConnect) 后才连接 WebSocket
   const { connected } = useWebSocket(readyToConnect ? traceId : null, wsOptions);
   const { connected } = useWebSocket(readyToConnect ? traceId : null, wsOptions);
 
 
-  return { goals, messages, msgGroups, connected, reloading, invalidBranches };
+  return { goals, messages, msgGroups, connected, reloading, invalidBranches, traceCompleted };
 };
 };

+ 25 - 0
frontend/react-template/src/components/MainContent/MainContent.module.css

@@ -58,6 +58,31 @@
   background-color: var(--border-medium);
   background-color: var(--border-medium);
 }
 }
 
 
+.completedBadge {
+  display: flex;
+  align-items: center;
+  gap: var(--space-xs);
+  padding: 4px 12px;
+  background-color: #ecfdf5; /* Emerald 50 */
+  border: 1px solid var(--color-success);
+  border-radius: var(--radius-full);
+  color: var(--color-success);
+  font-size: 13px;
+  font-weight: 500;
+  animation: fadeIn 300ms ease-out;
+}
+
+@keyframes fadeIn {
+  from {
+    opacity: 0;
+    transform: translateY(-4px);
+  }
+  to {
+    opacity: 1;
+    transform: translateY(0);
+  }
+}
+
 .headerRight {
 .headerRight {
   display: flex;
   display: flex;
   align-items: center;
   align-items: center;

+ 13 - 2
frontend/react-template/src/components/MainContent/MainContent.tsx

@@ -47,7 +47,10 @@ export const MainContent: FC<MainContentProps> = ({
   const [cachedGoals, setCachedGoals] = useState<Goal[]>([]);
   const [cachedGoals, setCachedGoals] = useState<Goal[]>([]);
   const [cachedMsgGroups, setCachedMsgGroups] = useState<Record<string, Message[]>>({});
   const [cachedMsgGroups, setCachedMsgGroups] = useState<Record<string, Message[]>>({});
   const [cachedInvalidBranches, setCachedInvalidBranches] = useState<Message[][]>([]);
   const [cachedInvalidBranches, setCachedInvalidBranches] = useState<Message[][]>([]);
-  const { goals, connected, msgGroups, reloading, invalidBranches } = useFlowChartData(traceId, messageRefreshTrigger);
+  const { goals, connected, msgGroups, reloading, invalidBranches, traceCompleted } = useFlowChartData(
+    traceId,
+    messageRefreshTrigger,
+  );
   console.log("%c [ msgGroups ]-34", "font-size:13px; background:pink; color:#bf2c9f;", msgGroups);
   console.log("%c [ msgGroups ]-34", "font-size:13px; background:pink; color:#bf2c9f;", msgGroups);
   const displayGoals = goals.length > 0 ? goals : cachedGoals;
   const displayGoals = goals.length > 0 ? goals : cachedGoals;
   const displayMsgGroups = Object.keys(msgGroups).length > 0 ? msgGroups : cachedMsgGroups;
   const displayMsgGroups = Object.keys(msgGroups).length > 0 ? msgGroups : cachedMsgGroups;
@@ -111,7 +114,15 @@ export const MainContent: FC<MainContentProps> = ({
   return (
   return (
     <div className={styles.main}>
     <div className={styles.main}>
       <div className={styles.header}>
       <div className={styles.header}>
-        <ConnectionStatus isConnected={connected} />
+        <div style={{ display: "flex", alignItems: "center", gap: 16 }}>
+          <ConnectionStatus isConnected={connected} />
+          {traceCompleted && (
+            <div className={styles.completedBadge}>
+              <span style={{ fontSize: 16 }}>✓</span>
+              <span>执行完成</span>
+            </div>
+          )}
+        </div>
         <div className={styles.headerRight}>
         <div className={styles.headerRight}>
           <Select
           <Select
             value={traceId}
             value={traceId}

+ 32 - 1
frontend/react-template/src/main.tsx

@@ -1,10 +1,41 @@
 import { createRoot } from "react-dom/client";
 import { createRoot } from "react-dom/client";
+import { ErrorBoundary } from "react-error-boundary";
+import type { FallbackProps } from "react-error-boundary";
 import App from "./App";
 import App from "./App";
 import "./styles/global.css";
 import "./styles/global.css";
 import "./styles/variables.css";
 import "./styles/variables.css";
 
 
 const container = document.getElementById("root");
 const container = document.getElementById("root");
 
 
+const ErrorFallback = ({ error, resetErrorBoundary }: FallbackProps) => {
+  return (
+    <div style={{ padding: "20px", textAlign: "center", marginTop: "50px" }}>
+      <h2>Something went wrong:</h2>
+      <pre style={{ color: "red", backgroundColor: "#fce4e4", padding: "10px", borderRadius: "4px" }}>
+        {error instanceof Error ? error.message : String(error)}
+      </pre>
+      <button
+        onClick={resetErrorBoundary}
+        style={{
+          marginTop: "10px",
+          padding: "8px 16px",
+          backgroundColor: "#3b82f6",
+          color: "white",
+          border: "none",
+          borderRadius: "4px",
+          cursor: "pointer",
+        }}
+      >
+        Try again
+      </button>
+    </div>
+  );
+};
+
 if (container) {
 if (container) {
-  createRoot(container).render(<App />);
+  createRoot(container).render(
+    <ErrorBoundary FallbackComponent={ErrorFallback}>
+      <App />
+    </ErrorBoundary>,
+  );
 }
 }

+ 9 - 0
frontend/react-template/src/types/goal.ts

@@ -1,3 +1,10 @@
+export interface GoalStats {
+  message_count: number;
+  total_tokens: number;
+  total_cost: number;
+  preview?: string;
+}
+
 export interface Goal {
 export interface Goal {
   id: string;
   id: string;
   description: string;
   description: string;
@@ -11,6 +18,8 @@ export interface Goal {
   agent_call_mode?: string;
   agent_call_mode?: string;
   sub_trace_ids?: Array<string | { trace_id: string; mission?: string }>;
   sub_trace_ids?: Array<string | { trace_id: string; mission?: string }>;
   sub_goals?: Array<Goal>;
   sub_goals?: Array<Goal>;
+  self_stats?: GoalStats;
+  cumulative_stats?: GoalStats;
 }
 }
 
 
 export interface BranchContext {
 export interface BranchContext {

+ 4 - 1
frontend/react-template/src/types/trace.ts

@@ -11,6 +11,8 @@ export interface TraceListItem {
   current_goal_id: string;
   current_goal_id: string;
   created_at: string;
   created_at: string;
   parent_trace_id: string | null;
   parent_trace_id: string | null;
+  agent_type?: string;
+  parent_goal_id?: string;
 }
 }
 
 
 export interface TraceListResponse {
 export interface TraceListResponse {
@@ -33,5 +35,6 @@ export interface TraceDetailResponse {
     current_id: string | null;
     current_id: string | null;
     goals: Goal[];
     goals: Goal[];
   };
   };
-  branches: Record<string, BranchContext>;
+  sub_traces?: Record<string, TraceListItem>;
+  branches?: Record<string, BranchContext>; // Deprecated but kept for compatibility
 }
 }

+ 1 - 0
node_modules/.vite/vitest/da39a3ee5e6b4b0d3255bfef95601890afd80709/results.json

@@ -0,0 +1 @@
+{"version":"4.0.18","results":[[":frontend/react-template/src/components/FlowChart/hooks/useFlowChartData.spec.ts",{"duration":0,"failed":true}]]}

Некоторые файлы не были показаны из-за большого количества измененных файлов