Parcourir la source

Merge branch 'main' of https://git.yishihui.com/howard/Agent

guantao il y a 6 heures
Parent
commit
e1749c67d9

+ 1 - 0
README.md

@@ -243,6 +243,7 @@ RunConfig(
     agent_type="default",     # 预设类型:default / explore / analyst
     agent_type="default",     # 预设类型:default / explore / analyst
     trace_id=None,            # 续跑/回溯时传入已有 trace ID
     trace_id=None,            # 续跑/回溯时传入已有 trace ID
     after_sequence=None,      # 从哪条消息后续跑(message sequence)
     after_sequence=None,      # 从哪条消息后续跑(message sequence)
+    goal_compression="on_overflow",  # Goal 压缩模式:none / on_complete / on_overflow
     knowledge=KnowledgeConfig(),  # 知识管理配置
     knowledge=KnowledgeConfig(),  # 知识管理配置
 )
 )
 ```
 ```

+ 1 - 0
agent/core/prompts/__init__.py

@@ -31,6 +31,7 @@ from agent.core.prompts.compression import (
     COMPRESSION_EVAL_PROMPT_TEMPLATE,
     COMPRESSION_EVAL_PROMPT_TEMPLATE,
     SUMMARY_HEADER_TEMPLATE,
     SUMMARY_HEADER_TEMPLATE,
     build_compression_eval_prompt,
     build_compression_eval_prompt,
+    build_single_turn_prompt,
     build_summary_header,
     build_summary_header,
 )
 )
 
 

+ 23 - 0
agent/core/prompts/compression.py

@@ -21,11 +21,30 @@ COMPRESSION_PROMPT_TEMPLATE = """请对以上对话历史进行压缩总结。
 格式要求:
 格式要求:
 [[SUMMARY]]
 [[SUMMARY]]
 (此处填写结构化的摘要内容)
 (此处填写结构化的摘要内容)
+
+**生成摘要后立即停止,不要继续执行原有任务。**
 """
 """
 
 
 # 保留旧名以兼容 compaction.py 的调用
 # 保留旧名以兼容 compaction.py 的调用
 COMPRESSION_EVAL_PROMPT_TEMPLATE = COMPRESSION_PROMPT_TEMPLATE
 COMPRESSION_EVAL_PROMPT_TEMPLATE = COMPRESSION_PROMPT_TEMPLATE
 
 
+SINGLE_TURN_PROMPT = """请对以上对话历史进行压缩总结。
+
+### 摘要要求
+1. 保留关键决策、结论和产出(如创建的文件、修改的代码、得出的分析结论)
+2. 保留重要的上下文(如用户的要求、约束条件、之前的讨论结果)
+3. 省略中间探索过程、重复的工具调用细节
+4. 使用结构化格式(标题 + 要点 + 相关资源引用,若有)
+5. 控制在 2000 字以内
+
+当前 GoalTree 状态:
+{goal_tree_prompt}
+
+格式要求:
+[[SUMMARY]]
+(此处填写结构化的摘要内容)
+"""
+
 SUMMARY_HEADER_TEMPLATE = """## 对话历史摘要(自动压缩)
 SUMMARY_HEADER_TEMPLATE = """## 对话历史摘要(自动压缩)
 
 
 {summary_text}
 {summary_text}
@@ -46,5 +65,9 @@ def build_compression_eval_prompt(
     )
     )
 
 
 
 
+def build_single_turn_prompt(goal_tree_prompt: str) -> str:
+    return SINGLE_TURN_PROMPT.format(goal_tree_prompt=goal_tree_prompt)
+
+
 def build_summary_header(summary_text: str) -> str:
 def build_summary_header(summary_text: str) -> str:
     return SUMMARY_HEADER_TEMPLATE.format(summary_text=summary_text)
     return SUMMARY_HEADER_TEMPLATE.format(summary_text=summary_text)

+ 2 - 0
agent/core/prompts/knowledge.py

@@ -50,6 +50,7 @@ REFLECT_PROMPT = """请回顾以上执行过程,将值得沉淀的经验直接
 - 只保存最有价值的经验,宁少勿滥;一次就成功或比较简单的经验就不要记录了,记录反复尝试或被用户指导后才成功的经验、或者是调研之后的收获。
 - 只保存最有价值的经验,宁少勿滥;一次就成功或比较简单的经验就不要记录了,记录反复尝试或被用户指导后才成功的经验、或者是调研之后的收获。
 - 不需要输出任何文字,直接调用工具即可
 - 不需要输出任何文字,直接调用工具即可
 - 如果没有值得保存的经验,不调用任何工具
 - 如果没有值得保存的经验,不调用任何工具
+- **完成经验保存后立即停止,不要继续执行原有任务**
 """
 """
 
 
 
 
@@ -91,6 +92,7 @@ COMPLETION_REFLECT_PROMPT = """请对整个任务进行复盘,将值得沉淀
 - 只保存最有价值的经验,宁少勿滥;一次就成功或比较简单的经验就不要记录了,记录反复尝试或被用户指导后才成功的经验、或者是调研之后的收获。
 - 只保存最有价值的经验,宁少勿滥;一次就成功或比较简单的经验就不要记录了,记录反复尝试或被用户指导后才成功的经验、或者是调研之后的收获。
 - 不需要输出任何文字,直接调用工具即可
 - 不需要输出任何文字,直接调用工具即可
 - 如果没有值得保存的经验,不调用任何工具
 - 如果没有值得保存的经验,不调用任何工具
+- **完成经验保存后立即停止,不要继续执行原有任务**
 """
 """
 
 
 
 

+ 147 - 255
agent/core/runner.py

@@ -28,7 +28,7 @@ from agent.trace.protocols import TraceStore
 from agent.trace.goal_models import GoalTree
 from agent.trace.goal_models import GoalTree
 from agent.trace.compaction import (
 from agent.trace.compaction import (
     CompressionConfig,
     CompressionConfig,
-    filter_by_goal_status,
+    compress_completed_goals,
     estimate_tokens,
     estimate_tokens,
     needs_level2_compression,
     needs_level2_compression,
     build_compression_prompt,
     build_compression_prompt,
@@ -105,6 +105,7 @@ class RunConfig:
     max_iterations: int = 200
     max_iterations: int = 200
     tools: Optional[List[str]] = None          # None = 全部已注册工具
     tools: Optional[List[str]] = None          # None = 全部已注册工具
     side_branch_max_turns: int = 5             # 侧分支最大轮次(压缩/反思)
     side_branch_max_turns: int = 5             # 侧分支最大轮次(压缩/反思)
+    goal_compression: Literal["none", "on_complete", "on_overflow"] = "on_overflow"  # Goal 压缩模式
 
 
     # --- 强制侧分支(用于 API 手动触发或自动压缩流程)---
     # --- 强制侧分支(用于 API 手动触发或自动压缩流程)---
     # 使用列表作为侧分支队列,每次完成一个侧分支后 pop(0) 取下一个
     # 使用列表作为侧分支队列,每次完成一个侧分支后 pop(0) 取下一个
@@ -787,19 +788,20 @@ class AgentRunner:
             config.force_side_branch = ["reflection", "compression"]
             config.force_side_branch = ["reflection", "compression"]
             return history, head_seq, sequence, True
             return history, head_seq, sequence, True
 
 
-        # Level 1 压缩:GoalTree 过滤
-        if self.trace_store and goal_tree:
+        # 以下为未启用反思、需要压缩的情况,直接进行level 1压缩,并检查是否需要进行level 2压缩(进入侧分支)
+        # Level 1 压缩:Goal 完成压缩
+        if config.goal_compression != "none" and self.trace_store and goal_tree:
             if head_seq > 0:
             if head_seq > 0:
                 main_path_msgs = await self.trace_store.get_main_path_messages(
                 main_path_msgs = await self.trace_store.get_main_path_messages(
                     trace_id, head_seq
                     trace_id, head_seq
                 )
                 )
-                filtered_msgs = filter_by_goal_status(main_path_msgs, goal_tree)
-                if len(filtered_msgs) < len(main_path_msgs):
+                compressed_msgs = compress_completed_goals(main_path_msgs, goal_tree)
+                if len(compressed_msgs) < len(main_path_msgs):
                     logger.info(
                     logger.info(
                         "Level 1 压缩: %d -> %d 条消息",
                         "Level 1 压缩: %d -> %d 条消息",
-                        len(main_path_msgs), len(filtered_msgs),
+                        len(main_path_msgs), len(compressed_msgs),
                     )
                     )
-                    history = [msg.to_llm_dict() for msg in filtered_msgs]
+                    history = [msg.to_llm_dict() for msg in compressed_msgs]
                 else:
                 else:
                     logger.info(
                     logger.info(
                         "Level 1 压缩: 无可过滤消息 (%d 条全部保留)",
                         "Level 1 压缩: 无可过滤消息 (%d 条全部保留)",
@@ -807,7 +809,7 @@ class AgentRunner:
                     )
                     )
         elif needs_compression:
         elif needs_compression:
             logger.warning(
             logger.warning(
-                "消息数 (%d) 或 token 数 (%d) 超过阈值,但无法执行 Level 1 压缩(缺少 store 或 goal_tree)",
+                "消息数 (%d) 或 token 数 (%d) 超过阈值,但无法执行 Level 1 压缩(缺少 store 或 goal_tree,或 goal_compression=none)",
                 msg_count, token_count,
                 msg_count, token_count,
             )
             )
 
 
@@ -853,15 +855,15 @@ class AgentRunner:
         history: List[Dict],
         history: List[Dict],
         goal_tree: Optional[GoalTree],
         goal_tree: Optional[GoalTree],
         config: RunConfig,
         config: RunConfig,
-        sequence: int,
-        start_head_seq: int,
-    ) -> Tuple[List[Dict], int, int]:
-        """单次 LLM 调用压缩(fallback 方案)"""
+    ) -> str:
+        """单次 LLM 调用生成压缩摘要,返回 summary 文本"""
 
 
-        logger.info("执行单次 LLM 压缩(fallback)")
+        logger.info("执行单次 LLM 压缩")
 
 
-        # 构建压缩 prompt
-        compress_prompt = build_compression_prompt(goal_tree)
+        # 构建压缩 prompt(使用 SINGLE_TURN_PROMPT)
+        from agent.core.prompts import build_single_turn_prompt
+        goal_prompt = goal_tree.to_prompt(include_summary=True) if goal_tree else ""
+        compress_prompt = build_single_turn_prompt(goal_prompt)
         compress_messages = list(history) + [
         compress_messages = list(history) + [
             {"role": "user", "content": compress_prompt}
             {"role": "user", "content": compress_prompt}
         ]
         ]
@@ -888,32 +890,7 @@ class AgentRunner:
                 summary_text.index("[[SUMMARY]]") + len("[[SUMMARY]]"):
                 summary_text.index("[[SUMMARY]]") + len("[[SUMMARY]]"):
             ].strip()
             ].strip()
 
 
-        if not summary_text:
-            logger.warning("单次压缩未返回有效内容,跳过压缩")
-            return history, start_head_seq, sequence
-
-        # 创建 summary 消息
-        from agent.core.prompts import build_summary_header
-        summary_msg = Message.create(
-            trace_id=trace_id,
-            role="user",
-            sequence=sequence,
-            parent_sequence=start_head_seq,
-            branch_type=None,  # 主路径
-            content=build_summary_header(summary_text),
-        )
-
-        if self.trace_store:
-            await self.trace_store.add_message(summary_msg)
-
-        new_history = self._rebuild_history_after_compression(
-            history, summary_msg.to_llm_dict(), label="单次压缩"
-        )
-
-        new_head_seq = sequence
-        sequence += 1
-
-        return new_history, new_head_seq, sequence
+        return summary_text
 
 
     async def _agent_loop(
     async def _agent_loop(
         self,
         self,
@@ -937,13 +914,14 @@ class AgentRunner:
         if trace.context.get("active_side_branch"):
         if trace.context.get("active_side_branch"):
             side_branch_data = trace.context["active_side_branch"]
             side_branch_data = trace.context["active_side_branch"]
             branch_id = side_branch_data["branch_id"]
             branch_id = side_branch_data["branch_id"]
+            start_sequence = side_branch_data["start_sequence"]
 
 
-            # 从数据库查询侧分支消息
+            # 从数据库查询侧分支消息(按 sequence 范围)
             if self.trace_store:
             if self.trace_store:
                 all_messages = await self.trace_store.get_trace_messages(trace_id)
                 all_messages = await self.trace_store.get_trace_messages(trace_id)
                 side_messages = [
                 side_messages = [
                     m for m in all_messages
                     m for m in all_messages
-                    if m.branch_id == branch_id
+                    if m.sequence >= start_sequence
                 ]
                 ]
 
 
                 # 恢复侧分支上下文
                 # 恢复侧分支上下文
@@ -969,6 +947,8 @@ class AgentRunner:
                 # 重新计算 start_history_length
                 # 重新计算 start_history_length
                 side_branch_ctx.start_history_length = len(history) - len(side_messages)
                 side_branch_ctx.start_history_length = len(history) - len(side_messages)
 
 
+        break_after_side_branch = False  # 侧分支退出后是否 break 主循环
+
         for iteration in range(config.max_iterations):
         for iteration in range(config.max_iterations):
             # 更新活动时间(表明trace正在活跃运行)
             # 更新活动时间(表明trace正在活跃运行)
             if self.trace_store:
             if self.trace_store:
@@ -1002,7 +982,11 @@ class AgentRunner:
             # Context 管理(仅主路径)
             # Context 管理(仅主路径)
             needs_enter_side_branch = False
             needs_enter_side_branch = False
             if not side_branch_ctx:
             if not side_branch_ctx:
-                # 检查是否强制进入侧分支(API 手动触发)
+                # 侧分支退出后需要 break 主循环
+                if break_after_side_branch and not config.force_side_branch:
+                    break
+
+                # 检查是否强制进入侧分支(API 手动触发或自动压缩流程)
                 if config.force_side_branch:
                 if config.force_side_branch:
                     needs_enter_side_branch = True
                     needs_enter_side_branch = True
                     logger.info(f"强制进入侧分支: {config.force_side_branch}")
                     logger.info(f"强制进入侧分支: {config.force_side_branch}")
@@ -1198,118 +1182,78 @@ class AgentRunner:
             if side_branch_ctx:
             if side_branch_ctx:
                 # 计算侧分支已执行的轮次
                 # 计算侧分支已执行的轮次
                 turns_in_branch = iteration - side_branch_ctx.start_iteration
                 turns_in_branch = iteration - side_branch_ctx.start_iteration
+                should_exit = turns_in_branch >= side_branch_ctx.max_turns or not tool_calls
 
 
-                # 检查是否达到最大轮次
                 if turns_in_branch >= side_branch_ctx.max_turns:
                 if turns_in_branch >= side_branch_ctx.max_turns:
                     logger.warning(
                     logger.warning(
                         f"侧分支 {side_branch_ctx.type} 达到最大轮次 "
                         f"侧分支 {side_branch_ctx.type} 达到最大轮次 "
                         f"{side_branch_ctx.max_turns},强制退出"
                         f"{side_branch_ctx.max_turns},强制退出"
                     )
                     )
 
 
-                    if side_branch_ctx.type == "compression":
-                        # 压缩侧分支:fallback 到单次 LLM 调用
-                        logger.info("Fallback 到单次 LLM 压缩")
-
-                        # 清除侧分支状态
-                        trace.context.pop("active_side_branch", None)
-                        if self.trace_store:
-                            await self.trace_store.update_trace(
-                                trace_id, context=trace.context
-                            )
-
-                        # 恢复到侧分支开始前的 history
-                        if self.trace_store:
-                            main_path_messages = await self.trace_store.get_main_path_messages(
-                                trace_id, side_branch_ctx.start_head_seq
-                            )
-                            history = [m.to_llm_dict() for m in main_path_messages]
-
-                        # 执行单次 LLM 压缩
-                        history, head_seq, sequence = await self._single_turn_compress(
-                            trace_id, history, goal_tree, config, sequence,
-                            side_branch_ctx.start_head_seq
+                if should_exit and side_branch_ctx.type == "compression":
+                    # === 压缩侧分支退出(超时 + 正常完成统一处理)===
+                    summary_text = ""
+
+                    # 1. 从当前回复提取
+                    if response_content:
+                        if "[[SUMMARY]]" in response_content:
+                            summary_text = response_content[
+                                response_content.index("[[SUMMARY]]") + len("[[SUMMARY]]"):
+                            ].strip()
+                        elif response_content.strip():
+                            summary_text = response_content.strip()
+
+                    # 2. 从持久化存储按 sequence 范围查询
+                    if not summary_text and self.trace_store:
+                        all_messages = await self.trace_store.get_trace_messages(trace_id)
+                        side_messages = [
+                            m for m in all_messages
+                            if m.sequence >= side_branch_ctx.start_sequence
+                        ]
+                        for msg in reversed(side_messages):
+                            if msg.role == "assistant" and isinstance(msg.content, dict):
+                                text = msg.content.get("text", "")
+                                if "[[SUMMARY]]" in text:
+                                    summary_text = text[text.index("[[SUMMARY]]") + len("[[SUMMARY]]"):].strip()
+                                    break
+                                elif text:
+                                    summary_text = text
+                                    break
+
+                    # 3. 单次 LLM 调用
+                    if not summary_text:
+                        logger.warning("侧分支未生成有效 summary,fallback 到单次 LLM 压缩")
+                        pre_branch_history = history[:side_branch_ctx.start_history_length]
+                        summary_text = await self._single_turn_compress(
+                            trace_id, pre_branch_history, goal_tree, config,
                         )
                         )
 
 
-                        # 清除强制侧分支配置
-                        config.force_side_branch = None
-
-                        side_branch_ctx = None
-                        continue
-
-                    elif side_branch_ctx.type == "reflection":
-                        # 反思侧分支:直接退出,不管结果
-                        logger.info("反思侧分支超时,直接退出")
-
-                        # 清除侧分支状态
-                        trace.context.pop("active_side_branch", None)
-
-                        # 队列中如果还有侧分支,保持 force_side_branch;否则清空
-                        if not config.force_side_branch or len(config.force_side_branch) == 0:
-                            config.force_side_branch = None
-                            logger.info("反思超时,队列为空")
-
-                        if self.trace_store:
-                            await self.trace_store.update_trace(
-                                trace_id, context=trace.context
-                            )
-
-                        # 恢复到侧分支开始前的 history
-                        if self.trace_store:
-                            main_path_messages = await self.trace_store.get_main_path_messages(
-                                trace_id, side_branch_ctx.start_head_seq
-                            )
-                            history = [m.to_llm_dict() for m in main_path_messages]
-                            head_seq = side_branch_ctx.start_head_seq
-
-                        # 清除强制侧分支配置
-                        config.force_side_branch = None
-
-                        side_branch_ctx = None
-                        continue
-
-                # 检查是否无工具调用(侧分支完成)
-                if not tool_calls:
-                    logger.info(f"侧分支 {side_branch_ctx.type} 完成(无工具调用)")
-
-                    # 提取结果
-                    if side_branch_ctx.type == "compression":
-                        # 从数据库查询侧分支消息并提取 summary
-                        summary_text = ""
-                        if self.trace_store:
-                            all_messages = await self.trace_store.get_trace_messages(trace_id)
-                            side_messages = [
-                                m for m in all_messages
-                                if m.branch_id == side_branch_ctx.branch_id
-                            ]
-
-                            for msg in side_messages:
-                                if msg.role == "assistant" and isinstance(msg.content, dict):
-                                    text = msg.content.get("text", "")
-                                    if "[[SUMMARY]]" in text:
-                                        summary_text = text[text.index("[[SUMMARY]]") + len("[[SUMMARY]]"):].strip()
-                                        break
-                                    elif text:
-                                        summary_text = text
-
-                        if not summary_text:
-                            logger.warning("侧分支未生成有效 summary,使用默认")
-                            summary_text = "压缩完成"
-
-                        # 创建主路径的 summary 消息(末尾追加详细 GoalTree)
+                    # 创建主路径 summary 消息并重建 history
+                    if summary_text:
                         from agent.core.prompts import build_summary_header
                         from agent.core.prompts import build_summary_header
                         summary_content = build_summary_header(summary_text)
                         summary_content = build_summary_header(summary_text)
 
 
-                        # 追加详细 GoalTree(压缩后立即注入)
                         if goal_tree and goal_tree.goals:
                         if goal_tree and goal_tree.goals:
                             goal_tree_detail = goal_tree.to_prompt(include_summary=True)
                             goal_tree_detail = goal_tree.to_prompt(include_summary=True)
                             summary_content += f"\n\n## Current Plan\n\n{goal_tree_detail}"
                             summary_content += f"\n\n## Current Plan\n\n{goal_tree_detail}"
 
 
+                        # 找第一条 user message 的 sequence 作为 parent
+                        # 续跑时 get_main_path_messages 沿 parent 链回溯,
+                        # 指向 first_user 可以跳过所有被压缩的中间消息
+                        first_user_seq = None
+                        if self.trace_store:
+                            all_msgs = await self.trace_store.get_trace_messages(trace_id)
+                            for m in all_msgs:
+                                if m.role == "user":
+                                    first_user_seq = m.sequence
+                                    break
+
                         summary_msg = Message.create(
                         summary_msg = Message.create(
                             trace_id=trace_id,
                             trace_id=trace_id,
                             role="user",
                             role="user",
                             sequence=sequence,
                             sequence=sequence,
-                            parent_sequence=side_branch_ctx.start_head_seq,
-                            branch_type=None,  # 回到主路径
+                            parent_sequence=first_user_seq,
+                            branch_type=None,
                             content=summary_content,
                             content=summary_content,
                         )
                         )
 
 
@@ -1319,41 +1263,42 @@ class AgentRunner:
                         history = self._rebuild_history_after_compression(
                         history = self._rebuild_history_after_compression(
                             history, summary_msg.to_llm_dict(), label="压缩侧分支"
                             history, summary_msg.to_llm_dict(), label="压缩侧分支"
                         )
                         )
-
                         head_seq = sequence
                         head_seq = sequence
                         sequence += 1
                         sequence += 1
+                    else:
+                        logger.error("所有压缩方案均未生成有效 summary,跳过压缩")
 
 
-                        # 清除侧分支队列
-                        config.force_side_branch = None
-
-                    elif side_branch_ctx.type == "reflection":
-                        # 反思侧分支:直接恢复主路径
-                        logger.info("反思侧分支完成")
+                    # 清理
+                    trace.context.pop("active_side_branch", None)
+                    config.force_side_branch = None
+                    if self.trace_store:
+                        await self.trace_store.update_trace(
+                            trace_id, context=trace.context, head_sequence=head_seq,
+                        )
+                    side_branch_ctx = None
+                    continue
 
 
-                        if self.trace_store:
-                            main_path_messages = await self.trace_store.get_main_path_messages(
-                                trace_id, side_branch_ctx.start_head_seq
-                            )
-                            history = [m.to_llm_dict() for m in main_path_messages]
-                            head_seq = side_branch_ctx.start_head_seq
+                elif should_exit and side_branch_ctx.type == "reflection":
+                    # === 反思侧分支退出(超时 + 正常完成统一处理)===
+                    logger.info("反思侧分支退出")
 
 
-                        # 队列中如果还有侧分支,保持 force_side_branch;否则清空
-                        if not config.force_side_branch or len(config.force_side_branch) == 0:
-                            config.force_side_branch = None
-                            logger.info("反思完成,队列为空")
+                    # 恢复主路径
+                    if self.trace_store:
+                        main_path_messages = await self.trace_store.get_main_path_messages(
+                            trace_id, side_branch_ctx.start_head_seq
+                        )
+                        history = [m.to_llm_dict() for m in main_path_messages]
+                        head_seq = side_branch_ctx.start_head_seq
 
 
-                    # 清除侧分支状态
+                    # 清
                     trace.context.pop("active_side_branch", None)
                     trace.context.pop("active_side_branch", None)
+                    if not config.force_side_branch or len(config.force_side_branch) == 0:
+                        config.force_side_branch = None
+                        logger.info("反思完成,队列为空")
                     if self.trace_store:
                     if self.trace_store:
                         await self.trace_store.update_trace(
                         await self.trace_store.update_trace(
-                            trace_id,
-                            context=trace.context,
-                            head_sequence=head_seq,
+                            trace_id, context=trace.context, head_sequence=head_seq,
                         )
                         )
-
-                    # 注意:不在这里清除 force_side_branch,因为反思侧分支可能已经设置了下一个侧分支
-                    # force_side_branch 的清除由各个分支类型自己处理
-
                     side_branch_ctx = None
                     side_branch_ctx = None
                     continue
                     continue
 
 
@@ -1518,14 +1463,50 @@ class AgentRunner:
                         "content": tool_content_for_llm,
                         "content": tool_content_for_llm,
                     })
                     })
 
 
+                # on_complete 模式:goal(done=...) 后立即压缩该 goal 的消息
+                if (
+                    not side_branch_ctx
+                    and config.goal_compression == "on_complete"
+                    and self.trace_store
+                    and goal_tree
+                ):
+                    has_goal_done = False
+                    for tc in tool_calls:
+                        if tc["function"]["name"] != "goal":
+                            continue
+                        try:
+                            raw = tc["function"]["arguments"]
+                            args = json.loads(raw) if isinstance(raw, str) and raw.strip() else {}
+                        except (json.JSONDecodeError, TypeError):
+                            args = {}
+                        if args.get("done") is not None:
+                            has_goal_done = True
+                            break
+
+                    if has_goal_done:
+                        main_path_msgs = await self.trace_store.get_main_path_messages(
+                            trace_id, head_seq
+                        )
+                        compressed_msgs = compress_completed_goals(main_path_msgs, goal_tree)
+                        if len(compressed_msgs) < len(main_path_msgs):
+                            logger.info(
+                                "on_complete 压缩: %d -> %d 条消息",
+                                len(main_path_msgs), len(compressed_msgs),
+                            )
+                            history = [msg.to_llm_dict() for msg in compressed_msgs]
+
                 continue  # 继续循环
                 continue  # 继续循环
 
 
-            # 无工具调用,任务完成
-            break
+            # 无工具调用
+            # 如果在侧分支中,已经在上面处理过了(不会走到这里)
+            # 主路径无工具调用 → 任务完成,检查是否需要完成后反思
+            if not side_branch_ctx and config.knowledge.enable_completion_extraction and not break_after_side_branch:
+                config.force_side_branch = ["reflection"]
+                break_after_side_branch = True
+                logger.info("任务完成,进入完成后反思侧分支")
+                continue
 
 
-        # 任务完成后复盘提取
-        if config.knowledge.enable_completion_extraction:
-            await self._extract_knowledge_on_completion(trace_id, history, config)
+            break
 
 
         # 清理 trace 相关的跟踪数据
         # 清理 trace 相关的跟踪数据
         self._context_warned.pop(trace_id, None)
         self._context_warned.pop(trace_id, None)
@@ -1594,95 +1575,6 @@ class AgentRunner:
 
 
         return new_history
         return new_history
 
 
-    async def _run_reflect(
-        self,
-        trace_id: str,
-        history: List[Dict],
-        config: RunConfig,
-        reflect_prompt: str,
-        source_name: str,
-    ) -> None:
-        """
-        执行反思提取:LLM 对历史消息进行反思,直接调用 knowledge_save 工具保存经验。
-
-        Args:
-            trace_id: Trace ID(作为知识的 message_id)
-            history: 当前对话历史
-            config: 运行配置
-            reflect_prompt: 反思 prompt
-            source_name: 来源名称(用于区分压缩时/完成时)
-        """
-        try:
-            reflect_messages = list(history) + [{"role": "user", "content": reflect_prompt}]
-            reflect_messages = self._add_cache_control(
-                reflect_messages, config.model, config.enable_prompt_caching
-            )
-
-            # 只暴露 knowledge_save 工具,让 LLM 直接调用
-            knowledge_save_schema = self._get_tool_schemas(["knowledge_save"])
-
-            reflect_result = await self.llm_call(
-                messages=reflect_messages,
-                model=config.model,
-                tools=knowledge_save_schema,
-                temperature=0.2,
-                **config.extra_llm_params,
-            )
-
-            tool_calls = reflect_result.get("tool_calls") or []
-            if not tool_calls:
-                logger.info("反思阶段无经验保存 (source=%s)", source_name)
-                return
-
-            saved_count = 0
-            for tc in tool_calls:
-                tool_name = tc.get("function", {}).get("name")
-                if tool_name != "knowledge_save":
-                    continue
-
-                tool_args = tc.get("function", {}).get("arguments") or {}
-                if isinstance(tool_args, str):
-                    tool_args = json.loads(tool_args) if tool_args.strip() else {}
-
-                # 注入来源信息(LLM 不需要填写这些字段)
-                tool_args.setdefault("source_name", source_name)
-                tool_args.setdefault("source_category", "exp")
-                tool_args.setdefault("message_id", trace_id)
-
-                try:
-                    await self.tools.execute(
-                        "knowledge_save",
-                        tool_args,
-                        uid=config.uid or "",
-                        context={
-                            "store": self.trace_store,
-                            "trace_id": trace_id,
-                            "knowledge_config": config.knowledge,
-                        },
-                    )
-                    saved_count += 1
-                except Exception as e:
-                    logger.warning("保存经验失败: %s", e)
-
-            logger.info("已提取并保存 %d 条经验 (source=%s)", saved_count, source_name)
-
-        except Exception as e:
-            logger.error("知识反思提取失败 (source=%s): %s", source_name, e)
-
-    async def _extract_knowledge_on_completion(
-        self,
-        trace_id: str,
-        history: List[Dict],
-        config: RunConfig,
-    ) -> None:
-        """任务完成后执行全局复盘,提取经验保存到知识库。"""
-        logger.info("任务完成后复盘提取: trace=%s", trace_id)
-        await self._run_reflect(
-            trace_id, history, config,
-            reflect_prompt=config.knowledge.get_completion_reflect_prompt(),
-            source_name="completion_reflection",
-        )
-
     # ===== 回溯(Rewind)=====
     # ===== 回溯(Rewind)=====
 
 
     async def _rewind(
     async def _rewind(

+ 73 - 13
agent/docs/architecture.md

@@ -1242,26 +1242,86 @@ async def get_experience(
 
 
 ## Context 压缩
 ## Context 压缩
 
 
-### 两级压缩策略
+### 压缩策略概述
 
 
-#### Level 1:GoalTree 过滤(确定性,零成本)
+Context 压缩分为两级,通过 `RunConfig` 中的 `goal_compression` 参数控制 Level 1 的行为:
 
 
-每轮 agent loop 构建 `llm_messages` 时自动执行:
-- 始终保留:system prompt、第一条 user message(含 GoalTree 精简视图)、当前 focus goal 的消息
-- 跳过 completed/abandoned goals 的消息(信息已在 GoalTree summary 中)
-- 通过 Message Tree 的 parent_sequence 实现跳过
+| 模式 | 值 | Level 1 行为 | Level 2 行为 |
+|------|-----|-------------|-------------|
+| 不压缩 | `"none"` | 跳过 Level 1 | 超限时直接进入 Level 2 |
+| 完成后压缩 | `"on_complete"` | 每个 goal 完成时立刻压缩该 goal 的消息 | 超限时进入 Level 2 |
+| 超长时压缩 | `"on_overflow"` | 超限时遍历所有 completed goal 逐个压缩 | Level 1 后仍超限则进入 Level 2 |
 
 
-大多数情况下 Level 1 足够。
+默认值:`"on_overflow"`
 
 
-#### Level 2:LLM 总结(仅在 Level 1 后仍超限时触发)
+```python
+RunConfig(
+    goal_compression="on_overflow",  # "none" | "on_complete" | "on_overflow"
+)
+```
+
+### Level 1:Goal 完成压缩(确定性,零 LLM 成本)
+
+对单个 completed goal 的压缩逻辑:
+
+1. **识别目标消息**:找到该 goal 关联的所有消息(`msg.goal_id == goal.id`)
+2. **区分 goal 工具消息和非 goal 消息**:检查 assistant 消息的 tool_calls 中是否调用了 `goal` 工具(实际场景中 goal 调用通常是单独一条 assistant 消息,不考虑混合情况)
+3. **保留 goal 工具消息**:保留所有调用 `goal(...)` 的 assistant 消息及其对应的 tool result(包括 add、focus、under、done 等操作)
+4. **删除非 goal 消息**:从 history 中移除该 goal 的其他 assistant 消息及其 tool result(read_file、bash、search 等中间工具调用)
+5. **替换 done 的 tool result**:将 `goal(done=...)` 的 tool result 内容替换为:"具体执行过程已清理"
+6. **纯内存操作**:压缩仅操作内存中的 history 列表,不涉及新增消息或持久化变更,原始消息永远保留在存储层
+
+压缩后的 history 片段示例:
+
+```
+... (前面的消息)
+[assistant] tool_calls: [goal(focus="1.1")]
+[tool] goal result: "## 更新\n- 焦点切换到: 1.1\n\n## Current Plan\n..."
+[assistant] tool_calls: [goal(done="1.1", summary="前端使用 React...")]
+[tool] goal result: "具体执行过程已清理"
+... (后面的消息)
+```
+
+#### `on_complete` 模式
+
+在 goal 工具执行 `done` 操作后,立刻对该 goal 执行压缩。优点是 history 始终保持精简,缺点是如果后续需要回溯到该 goal 的中间过程,信息已丢失(存储层仍保留原始消息)。
+
+**触发点**:`agent/trace/goal_tool.py` 中 done 操作完成后
+
+#### `on_overflow` 模式
+
+在 `_manage_context_usage` 检测到超限时,遍历所有 completed goal(按完成时间排序),逐个执行压缩,直到 token 数降到阈值以下或所有 completed goal 都已压缩。如果仍超限,进入 Level 2。
+
+**触发点**:`agent/core/runner.py:_manage_context_usage`
+
+**实现**:`agent/trace/compaction.py:compress_completed_goal`, `agent/trace/compaction.py:compress_all_completed_goals`
+
+### Level 2:LLM 总结(仅在 Level 1 后仍超限时触发)
+
+触发条件:Level 1 之后 token 数仍超过阈值(默认 `context_window × 0.5`)。
+
+通过侧分支队列机制执行,`force_side_branch` 为列表类型:
+
+1. **反思**(可选,由 `knowledge.enable_extraction` 控制):进入 `reflection` 侧分支,LLM 可多轮调用 knowledge_search、resource_save、knowledge_save 等工具提取经验
+2. **压缩**:进入 `compression` 侧分支,LLM 生成 summary
+
+侧分支队列示例:
+- 启用知识提取:`force_side_branch = ["reflection", "compression"]`
+- 仅压缩:`force_side_branch = ["compression"]`
+
+压缩完成后重建 history 为:`system prompt + 第一条 user message + summary(含详细 GoalTree)`
+
+**实现**:`agent/core/runner.py:_agent_loop`(侧分支状态机), `agent/core/runner.py:_rebuild_history_after_compression`
+
+### 任务完成后反思
 
 
-触发条件:Level 1 之后 token 数仍超过阈值(默认 `max_tokens × 0.8`)。
+主路径无工具调用(任务完成)时,如果 `knowledge.enable_completion_extraction` 为 True,通过侧分支机制进入反思:
 
 
-流程:
-1. **经验提取**:在消息列表末尾追加反思 prompt,进入侧分支 agent 模式(最多 5 轮),LLM 可调用工具(如 knowledge_search, knowledge_save)进行多轮推理。反思消息标记为 `branch_type="reflection"`,不在主路径上
-2. **压缩**:在消息列表末尾追加压缩 prompt(含 GoalTree 完整视图),进入侧分支 agent 模式(最多 5 轮),LLM 可调用工具(如 goal_status)辅助压缩。压缩消息标记为 `branch_type="compression"`,完成后创建 summary 消息,其 `parent_sequence` 跳过被压缩的范围
+1. 设置 `force_side_branch = ["reflection"]` 和 `break_after_side_branch = True`
+2. 反思侧分支完成后回到主路径
+3. 检测到 `break_after_side_branch` 标志,直接 break 退出循环
 
 
-**侧分支模式**:压缩和反思在同一 agent loop 中通过状态机实现,复用主路径的缓存和工具配置,支持多轮推理。
+**实现**:`agent/core/runner.py:_agent_loop`
 
 
 ### GoalTree 双视图
 ### GoalTree 双视图
 
 

+ 2 - 2
agent/tools/builtin/knowledge.py

@@ -41,7 +41,7 @@ class KnowledgeConfig:
     default_tags: Optional[Dict[str, str]] = None      # 默认 tags(会与工具调用参数合并)
     default_tags: Optional[Dict[str, str]] = None      # 默认 tags(会与工具调用参数合并)
     default_scopes: Optional[List[str]] = None         # 默认 scopes(空则用 ["org:cybertogether"])
     default_scopes: Optional[List[str]] = None         # 默认 scopes(空则用 ["org:cybertogether"])
     default_search_types: Optional[List[str]] = None   # 默认搜索类型过滤
     default_search_types: Optional[List[str]] = None   # 默认搜索类型过滤
-    default_search_owner: str = ""                     # 默认搜索 owner 过滤(空则不过滤)
+    default_search_owner: str = ""                     # 默认搜索 owner 过滤(空则不过滤,支持多个owner用逗号分隔,如 "user1@example.com,user2@example.com"
 
 
     def get_reflect_prompt(self) -> str:
     def get_reflect_prompt(self) -> str:
         """压缩时反思 prompt"""
         """压缩时反思 prompt"""
@@ -106,7 +106,7 @@ async def knowledge_search(
         top_k: 返回数量(默认 5)
         top_k: 返回数量(默认 5)
         min_score: 最低评分过滤(默认 3)
         min_score: 最低评分过滤(默认 3)
         types: 按类型过滤(user_profile/strategy/tool/usecase/definition/plan)
         types: 按类型过滤(user_profile/strategy/tool/usecase/definition/plan)
-        owner: 按所有者过滤(可选)
+        owner: 按所有者过滤(可选,支持多个owner用逗号分隔的字符串,如 "user1@example.com,user2@example.com"
         context: 工具上下文
         context: 工具上下文
 
 
     Returns:
     Returns:

+ 93 - 64
agent/trace/compaction.py

@@ -1,17 +1,19 @@
 """
 """
 Context 压缩 — 两级压缩策略
 Context 压缩 — 两级压缩策略
 
 
-Level 1: GoalTree 过滤(确定性,零成本)
-  - 跳过 completed/abandoned goals 的消息(信息已在 GoalTree summary 中)
-  - 始终保留:system prompt、第一条 user message、当前 focus goal 的消息
+Level 1: Goal 完成压缩(确定性,零 LLM 成本)
+  - 对 completed/abandoned goals:保留 goal 工具消息,移除非 goal 工具消息
+  - 三种模式:none / on_complete / on_overflow
 
 
 Level 2: LLM 总结(仅在 Level 1 后仍超限时触发)
 Level 2: LLM 总结(仅在 Level 1 后仍超限时触发)
-  - 在消息列表末尾追加压缩 prompt → 主模型回复 → summary 存为新消息
-  - summary 的 parent_sequence 跳过被压缩的范围
+  - 通过侧分支多轮 agent 模式压缩
+  - 压缩后重建 history 为:system prompt + 第一条 user message + summary
 
 
-压缩不修改存储:原始消息永远保留在 messages/,通过 parent_sequence 树结构实现跳过
+压缩不修改存储:原始消息永远保留在 messages/,纯内存操作
 """
 """
 
 
+import copy
+import json
 import logging
 import logging
 from dataclasses import dataclass
 from dataclasses import dataclass
 from typing import List, Dict, Any, Optional, Set
 from typing import List, Dict, Any, Optional, Set
@@ -19,7 +21,6 @@ from typing import List, Dict, Any, Optional, Set
 from .goal_models import GoalTree
 from .goal_models import GoalTree
 from .models import Message
 from .models import Message
 from agent.core.prompts import (
 from agent.core.prompts import (
-    COMPRESSION_EVAL_PROMPT_TEMPLATE,
     REFLECT_PROMPT,
     REFLECT_PROMPT,
     build_compression_eval_prompt,
     build_compression_eval_prompt,
 )
 )
@@ -99,84 +100,112 @@ class CompressionConfig:
         return int(window * self.threshold_ratio)
         return int(window * self.threshold_ratio)
 
 
 
 
-# ===== Level 1: GoalTree 过滤 =====
+# ===== Level 1: Goal 完成压缩 =====
 
 
-def filter_by_goal_status(
+def compress_completed_goals(
     messages: List[Message],
     messages: List[Message],
     goal_tree: Optional[GoalTree],
     goal_tree: Optional[GoalTree],
 ) -> List[Message]:
 ) -> List[Message]:
     """
     """
-    Level 1 过滤:跳过 completed/abandoned goals 的消息
+    Level 1 压缩:移除 completed/abandoned goals 的非 goal 工具消息
 
 
-    始终保留:
-    - goal_id 为 None 的消息(system prompt、初始 user message)
-    - 当前 focus goal 及其祖先链上的消息
-    - in_progress 和 pending goals 的消息
+    对每个 completed/abandoned goal:
+    - 保留:所有调用 goal 工具的 assistant 消息及其 tool result
+    - 移除:所有非 goal 工具的 assistant 消息及其 tool result
+    - 替换:goal(done=...) 的 tool result 内容为 "具体执行过程已清理"
+    - goal_id 为 None 的消息始终保留(system prompt、初始 user message)
+    - pending / in_progress goals 的消息不受影响
 
 
-    跳过:
-    - completed 且不在焦点路径上的 goals 的消息
-    - abandoned goals 的消息
+    纯内存操作,不修改原始 Message 对象,不涉及持久化。
 
 
     Args:
     Args:
-        messages: 主路径上的有序消息列表
+        messages: 主路径上的有序消息列表(Message 对象)
         goal_tree: GoalTree 实例
         goal_tree: GoalTree 实例
 
 
     Returns:
     Returns:
-        过滤后的消息列表
+        压缩后的消息列表
     """
     """
     if not goal_tree or not goal_tree.goals:
     if not goal_tree or not goal_tree.goals:
         return messages
         return messages
 
 
-    # 构建焦点路径(当前焦点 + 父链 + 直接子节点)
-    focus_path = _get_focus_path(goal_tree)
+    # 收集 completed/abandoned goal IDs
+    completed_ids: Set[str] = {
+        g.id for g in goal_tree.goals
+        if g.status in ("completed", "abandoned")
+    }
+    if not completed_ids:
+        return messages
 
 
-    # 构建需要跳过的 goal IDs
-    skip_goal_ids: Set[str] = set()
-    for goal in goal_tree.goals:
-        if goal.id in focus_path:
-            continue  # 焦点路径上的 goal 始终保留
-        if goal.status in ("completed", "abandoned"):
-            skip_goal_ids.add(goal.id)
+    # Pass 1: 扫描 assistant 消息,分类 tool_call_ids
+    remove_seqs: Set[int] = set()       # 要移除的 assistant 消息 sequence
+    remove_tc_ids: Set[str] = set()     # 要移除的 tool result 的 tool_call_id
+    done_tc_ids: Set[str] = set()       # goal(done=...) 的 tool_call_id(替换 tool result)
 
 
-    # 过滤消息
-    result = []
     for msg in messages:
     for msg in messages:
-        if msg.goal_id is None:
-            result.append(msg)  # 无 goal 的消息始终保留
-        elif msg.goal_id not in skip_goal_ids:
-            result.append(msg)  # 不在跳过列表中的消息保留
-
-    return result
-
-
-def _get_focus_path(goal_tree: GoalTree) -> Set[str]:
-    """
-    获取焦点路径上需要保留消息的 goal IDs
-
-    保留:焦点自身 + 父链 + 未完成的直接子节点
-    不保留:已完成/已放弃的直接子节点(信息已在 goal.summary 中)
-    """
-    focus_ids: Set[str] = set()
-
-    if not goal_tree.current_id:
-        return focus_ids
-
-    # 焦点自身
-    focus_ids.add(goal_tree.current_id)
-
-    # 父链
-    goal = goal_tree.find(goal_tree.current_id)
-    while goal and goal.parent_id:
-        focus_ids.add(goal.parent_id)
-        goal = goal_tree.find(goal.parent_id)
+        if msg.goal_id not in completed_ids:
+            continue
+        if msg.role != "assistant":
+            continue
+
+        content = msg.content
+        tc_list = []
+        if isinstance(content, dict):
+            tc_list = content.get("tool_calls", [])
+
+        if not tc_list:
+            # 纯文本 assistant 消息(无工具调用),移除
+            remove_seqs.add(msg.sequence)
+            continue
+
+        # 检查是否包含 goal 工具调用
+        has_goal_call = False
+        for tc in tc_list:
+            func_name = tc.get("function", {}).get("name", "")
+            if func_name == "goal":
+                has_goal_call = True
+                # 检查是否为 done 调用
+                args_str = tc.get("function", {}).get("arguments", "{}")
+                try:
+                    args = json.loads(args_str) if isinstance(args_str, str) else (args_str or {})
+                except json.JSONDecodeError:
+                    args = {}
+                if args.get("done") is not None:
+                    tc_id = tc.get("id")
+                    if tc_id:
+                        done_tc_ids.add(tc_id)
+
+        if not has_goal_call:
+            # 不含 goal 工具调用 → 移除整条 assistant 及其所有 tool results
+            remove_seqs.add(msg.sequence)
+            for tc in tc_list:
+                tc_id = tc.get("id")
+                if tc_id:
+                    remove_tc_ids.add(tc_id)
+
+    # 无需压缩
+    if not remove_seqs and not done_tc_ids:
+        return messages
 
 
-    # 直接子节点:仅保留未完成的(completed/abandoned 的信息已在 summary 中)
-    children = goal_tree.get_children(goal_tree.current_id)
-    for child in children:
-        if child.status not in ("completed", "abandoned"):
-            focus_ids.add(child.id)
+    # Pass 2: 构建结果
+    result: List[Message] = []
+    for msg in messages:
+        # 跳过标记移除的 assistant 消息
+        if msg.sequence in remove_seqs:
+            continue
+        # 跳过标记移除的 tool result
+        if msg.role == "tool" and msg.tool_call_id in remove_tc_ids:
+            continue
+
+        # 替换 done 的 tool result 内容
+        if msg.role == "tool" and msg.tool_call_id in done_tc_ids:
+            modified = copy.copy(msg)
+            modified.content = {"tool_name": "goal", "result": "具体执行过程已清理"}
+            result.append(modified)
+            continue
+
+        result.append(msg)
 
 
-    return focus_ids
+    return result
 
 
 
 
 # ===== Token 估算 =====
 # ===== Token 估算 =====

+ 197 - 0
agent/trace/upload_api.py

@@ -0,0 +1,197 @@
+"""
+Trace Upload API
+
+提供 Trace 压缩包上传和导入功能
+"""
+
+import os
+import shutil
+import tempfile
+import zipfile
+from typing import List, Dict, Any
+from fastapi import APIRouter, UploadFile, File, HTTPException
+from pydantic import BaseModel
+
+from .protocols import TraceStore
+
+
+router = APIRouter(prefix="/api/traces", tags=["traces"])
+
+
+# ===== Response 模型 =====
+
+
+class UploadResponse(BaseModel):
+    """上传响应"""
+    success: bool
+    message: str
+    imported_traces: List[str]
+    failed_traces: List[Dict[str, str]]
+
+
+# ===== 全局 TraceStore =====
+
+
+_trace_store: TraceStore | None = None
+
+
+def set_trace_store(store: TraceStore):
+    """设置 TraceStore 实例"""
+    global _trace_store
+    _trace_store = store
+
+
+def get_trace_store() -> TraceStore:
+    """获取 TraceStore 实例"""
+    if _trace_store is None:
+        raise RuntimeError("TraceStore not initialized")
+    return _trace_store
+
+
+# ===== 辅助函数 =====
+
+
+def is_valid_trace_folder(folder_path: str) -> bool:
+    """
+    验证是否是有效的 trace 文件夹
+
+    有效的 trace 文件夹应该包含:
+    - meta.json 文件
+    """
+    return os.path.isfile(os.path.join(folder_path, "meta.json"))
+
+
+def extract_and_import_traces(zip_path: str, base_trace_path: str) -> tuple[List[str], List[Dict[str, str]]]:
+    """
+    解压并导入 traces
+
+    Returns:
+        (imported_traces, failed_traces)
+    """
+    import logging
+    logger = logging.getLogger(__name__)
+
+    imported = []
+    failed = []
+
+    # 创建临时目录
+    with tempfile.TemporaryDirectory() as temp_dir:
+        try:
+            # 解压文件
+            with zipfile.ZipFile(zip_path, 'r') as zip_ref:
+                zip_ref.extractall(temp_dir)
+
+            logger.info(f"Extracted to temp dir: {temp_dir}")
+
+            # 收集所有有效的 trace 文件夹
+            valid_traces = []
+
+            # 遍历解压后的内容
+            for root, dirs, files in os.walk(temp_dir):
+                # 检查当前目录是否是 trace 文件夹
+                if is_valid_trace_folder(root):
+                    valid_traces.append(root)
+                    logger.info(f"Found valid trace folder: {root}")
+
+            if not valid_traces:
+                logger.warning(f"No valid traces found in {temp_dir}")
+                # 列出临时目录的内容用于调试
+                for root, dirs, files in os.walk(temp_dir):
+                    logger.info(f"Dir: {root}, Files: {files[:5]}")  # 只显示前5个文件
+
+            # 导入找到的 trace 文件夹
+            for trace_folder in valid_traces:
+                trace_folder_name = os.path.basename(trace_folder)
+                target_path = os.path.join(base_trace_path, trace_folder_name)
+
+                try:
+                    # 如果目标已存在,跳过
+                    if os.path.exists(target_path):
+                        failed.append({
+                            "trace_id": trace_folder_name,
+                            "reason": "Trace already exists"
+                        })
+                        logger.warning(f"Trace already exists: {trace_folder_name}")
+                        continue
+
+                    # 复制到目标目录
+                    shutil.copytree(trace_folder, target_path)
+                    imported.append(trace_folder_name)
+                    logger.info(f"Imported trace: {trace_folder_name}")
+
+                except Exception as e:
+                    failed.append({
+                        "trace_id": trace_folder_name,
+                        "reason": str(e)
+                    })
+                    logger.error(f"Failed to import {trace_folder_name}: {e}")
+
+        except zipfile.BadZipFile:
+            raise HTTPException(status_code=400, detail="Invalid zip file")
+        except Exception as e:
+            logger.error(f"Failed to extract zip: {e}")
+            raise HTTPException(status_code=500, detail=f"Failed to extract zip: {str(e)}")
+
+    return imported, failed
+
+
+# ===== 路由 =====
+
+
+@router.post("/upload", response_model=UploadResponse)
+async def upload_traces(file: UploadFile = File(...)):
+    """
+    上传 trace 压缩包并导入
+
+    支持的格式:.zip
+    压缩包可以包含:
+    - 单个 trace 文件夹
+    - 多个 trace 文件夹
+    - 嵌套的 trace 文件夹
+
+    Args:
+        file: 上传的压缩包文件
+    """
+    # 验证文件类型
+    if not file.filename or not file.filename.endswith('.zip'):
+        raise HTTPException(status_code=400, detail="Only .zip files are supported")
+
+    # 获取 trace 存储路径
+    store = get_trace_store()
+    # 假设 FileSystemTraceStore 有 base_path 属性
+    if not hasattr(store, 'base_path'):
+        raise HTTPException(status_code=500, detail="TraceStore does not support file system operations")
+
+    base_trace_path = store.base_path
+
+    # 保存上传的文件到临时位置
+    with tempfile.NamedTemporaryFile(delete=False, suffix='.zip') as temp_file:
+        temp_file_path = temp_file.name
+        content = await file.read()
+        temp_file.write(content)
+
+    try:
+        # 解压并导入
+        imported, failed = extract_and_import_traces(temp_file_path, base_trace_path)
+
+        # 构建响应消息
+        if imported and not failed:
+            message = f"Successfully imported {len(imported)} trace(s)"
+        elif imported and failed:
+            message = f"Imported {len(imported)} trace(s), {len(failed)} failed"
+        elif not imported and failed:
+            message = f"Failed to import all traces"
+        else:
+            message = "No valid traces found in the zip file"
+
+        return UploadResponse(
+            success=len(imported) > 0,
+            message=message,
+            imported_traces=imported,
+            failed_traces=failed
+        )
+
+    finally:
+        # 清理临时文件
+        if os.path.exists(temp_file_path):
+            os.unlink(temp_file_path)

+ 5 - 0
api_server.py

@@ -21,6 +21,7 @@ from agent.trace.run_api import router as run_router, experiences_router, set_ru
 from agent.trace.websocket import router as ws_router, set_trace_store as set_ws_trace_store
 from agent.trace.websocket import router as ws_router, set_trace_store as set_ws_trace_store
 from agent.trace.examples_api import router as examples_router
 from agent.trace.examples_api import router as examples_router
 from agent.trace.logs_websocket import router as logs_router, setup_websocket_logging
 from agent.trace.logs_websocket import router as logs_router, setup_websocket_logging
+from agent.trace.upload_api import router as upload_router, set_trace_store as set_upload_trace_store
 
 
 
 
 # ===== 日志配置 =====
 # ===== 日志配置 =====
@@ -61,6 +62,7 @@ trace_store = FileSystemTraceStore(base_path=".trace")
 # 注入到 step_tree 模块
 # 注入到 step_tree 模块
 set_api_trace_store(trace_store)
 set_api_trace_store(trace_store)
 set_ws_trace_store(trace_store)
 set_ws_trace_store(trace_store)
+set_upload_trace_store(trace_store)
 
 
 
 
 # ===== 可选:配置 Runner(启用执行 API)=====
 # ===== 可选:配置 Runner(启用执行 API)=====
@@ -82,6 +84,9 @@ set_runner(runner)
 # Examples API(GET /api/examples)
 # Examples API(GET /api/examples)
 app.include_router(examples_router)
 app.include_router(examples_router)
 
 
+# Trace 上传 API(POST /api/traces/upload)
+app.include_router(upload_router)
+
 # Trace 执行 API(POST + GET /running,需配置 Runner)
 # Trace 执行 API(POST + GET /running,需配置 Runner)
 # 注意:run_router 必须在 api_router 之前注册,否则 GET /running 会被 /{trace_id} 捕获
 # 注意:run_router 必须在 api_router 之前注册,否则 GET /running 会被 /{trace_id} 捕获
 app.include_router(run_router)
 app.include_router(run_router)

+ 1 - 1
examples/research/config.py

@@ -36,7 +36,7 @@ RUN_CONFIG = RunConfig(
         default_tags={"project": "research", "domain": "ai_agent"},  # 默认 tags(会与工具调用参数合并)
         default_tags={"project": "research", "domain": "ai_agent"},  # 默认 tags(会与工具调用参数合并)
         default_scopes=["org:cybertogether"],  # 默认 scopes
         default_scopes=["org:cybertogether"],  # 默认 scopes
         default_search_types=[],  # 默认搜索类型过滤
         default_search_types=[],  # 默认搜索类型过滤
-        default_search_owner=""  # 默认搜索 owner 过滤(空则不过滤)
+        default_search_owner=""  # 默认搜索 owner 过滤(空则不过滤,支持多个owner用逗号分隔,如 "user1@example.com,user2@example.com"
     )
     )
 )
 )
 
 

+ 0 - 16
examples/research/knowledge/README.md

@@ -1,16 +0,0 @@
-# 新产品营销推广调研知识库
-
-本目录收集整理新产品面世时的营销推广策略、方法论和最佳实践。
-
-## 调研维度
-
-1. 权威资源和行业专家观点
-2. 成功案例分析
-3. 营销框架和方法论
-4. 不同渠道的推广策略
-
-## 更新日期
-2024年
-
----
-调研进行中...

+ 94 - 1
frontend/react-template/package-lock.json

@@ -12,6 +12,7 @@
         "@douyinfe/semi-ui": "^2.56.0",
         "@douyinfe/semi-ui": "^2.56.0",
         "axios": "^1.6.0",
         "axios": "^1.6.0",
         "d3": "^7.8.5",
         "d3": "^7.8.5",
+        "jszip": "^3.10.1",
         "react": "^18.2.0",
         "react": "^18.2.0",
         "react-dom": "^18.2.0",
         "react-dom": "^18.2.0",
         "react-error-boundary": "^6.1.1",
         "react-error-boundary": "^6.1.1",
@@ -4071,6 +4072,12 @@
         "url": "https://github.com/sponsors/sindresorhus"
         "url": "https://github.com/sponsors/sindresorhus"
       }
       }
     },
     },
+    "node_modules/core-util-is": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz",
+      "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==",
+      "license": "MIT"
+    },
     "node_modules/crelt": {
     "node_modules/crelt": {
       "version": "1.0.6",
       "version": "1.0.6",
       "resolved": "https://registry.npmmirror.com/crelt/-/crelt-1.0.6.tgz",
       "resolved": "https://registry.npmmirror.com/crelt/-/crelt-1.0.6.tgz",
@@ -5801,6 +5808,12 @@
         "node": ">= 4"
         "node": ">= 4"
       }
       }
     },
     },
+    "node_modules/immediate": {
+      "version": "3.0.6",
+      "resolved": "https://registry.npmjs.org/immediate/-/immediate-3.0.6.tgz",
+      "integrity": "sha512-XXOFtyqDjNDAQxVfYxuF7g9Il/IbWmmlQg2MYKOH8ExIT1qg6xc4zyS3HaEEATgs1btfzxq15ciUiY7gjSXRGQ==",
+      "license": "MIT"
+    },
     "node_modules/import-fresh": {
     "node_modules/import-fresh": {
       "version": "3.3.1",
       "version": "3.3.1",
       "resolved": "https://registry.npmmirror.com/import-fresh/-/import-fresh-3.3.1.tgz",
       "resolved": "https://registry.npmmirror.com/import-fresh/-/import-fresh-3.3.1.tgz",
@@ -5844,7 +5857,6 @@
       "version": "2.0.4",
       "version": "2.0.4",
       "resolved": "https://registry.npmmirror.com/inherits/-/inherits-2.0.4.tgz",
       "resolved": "https://registry.npmmirror.com/inherits/-/inherits-2.0.4.tgz",
       "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
       "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
-      "dev": true,
       "license": "ISC"
       "license": "ISC"
     },
     },
     "node_modules/inline-style-parser": {
     "node_modules/inline-style-parser": {
@@ -5968,6 +5980,12 @@
       "dev": true,
       "dev": true,
       "license": "MIT"
       "license": "MIT"
     },
     },
+    "node_modules/isarray": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
+      "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==",
+      "license": "MIT"
+    },
     "node_modules/isexe": {
     "node_modules/isexe": {
       "version": "2.0.0",
       "version": "2.0.0",
       "resolved": "https://registry.npmmirror.com/isexe/-/isexe-2.0.0.tgz",
       "resolved": "https://registry.npmmirror.com/isexe/-/isexe-2.0.0.tgz",
@@ -6098,6 +6116,18 @@
       "integrity": "sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ==",
       "integrity": "sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ==",
       "license": "MIT"
       "license": "MIT"
     },
     },
+    "node_modules/jszip": {
+      "version": "3.10.1",
+      "resolved": "https://registry.npmjs.org/jszip/-/jszip-3.10.1.tgz",
+      "integrity": "sha512-xXDvecyTpGLrqFrvkrUSoxxfJI5AH7U8zxxtVclpsUtMCq4JQ290LY8AW5c7Ggnr/Y/oK+bQMbqK2qmtk3pN4g==",
+      "license": "(MIT OR GPL-3.0-or-later)",
+      "dependencies": {
+        "lie": "~3.3.0",
+        "pako": "~1.0.2",
+        "readable-stream": "~2.3.6",
+        "setimmediate": "^1.0.5"
+      }
+    },
     "node_modules/keyv": {
     "node_modules/keyv": {
       "version": "4.5.4",
       "version": "4.5.4",
       "resolved": "https://registry.npmmirror.com/keyv/-/keyv-4.5.4.tgz",
       "resolved": "https://registry.npmmirror.com/keyv/-/keyv-4.5.4.tgz",
@@ -6122,6 +6152,15 @@
         "node": ">= 0.8.0"
         "node": ">= 0.8.0"
       }
       }
     },
     },
+    "node_modules/lie": {
+      "version": "3.3.0",
+      "resolved": "https://registry.npmjs.org/lie/-/lie-3.3.0.tgz",
+      "integrity": "sha512-UaiMJzeWRlEujzAuw5LokY1L5ecNQYZKfmyZ9L7wDHb/p5etKaxXhohBcrw0EYby+G/NA52vRSN4N39dxHAIwQ==",
+      "license": "MIT",
+      "dependencies": {
+        "immediate": "~3.0.5"
+      }
+    },
     "node_modules/lightningcss": {
     "node_modules/lightningcss": {
       "version": "1.30.2",
       "version": "1.30.2",
       "resolved": "https://registry.npmmirror.com/lightningcss/-/lightningcss-1.30.2.tgz",
       "resolved": "https://registry.npmmirror.com/lightningcss/-/lightningcss-1.30.2.tgz",
@@ -7767,6 +7806,12 @@
         "url": "https://github.com/sponsors/sindresorhus"
         "url": "https://github.com/sponsors/sindresorhus"
       }
       }
     },
     },
+    "node_modules/pako": {
+      "version": "1.0.11",
+      "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz",
+      "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==",
+      "license": "(MIT AND Zlib)"
+    },
     "node_modules/parent-module": {
     "node_modules/parent-module": {
       "version": "1.0.1",
       "version": "1.0.1",
       "resolved": "https://registry.npmmirror.com/parent-module/-/parent-module-1.0.1.tgz",
       "resolved": "https://registry.npmmirror.com/parent-module/-/parent-module-1.0.1.tgz",
@@ -7988,6 +8033,12 @@
         "node": ">=6"
         "node": ">=6"
       }
       }
     },
     },
+    "node_modules/process-nextick-args": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
+      "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==",
+      "license": "MIT"
+    },
     "node_modules/prop-types": {
     "node_modules/prop-types": {
       "version": "15.8.1",
       "version": "15.8.1",
       "resolved": "https://registry.npmmirror.com/prop-types/-/prop-types-15.8.1.tgz",
       "resolved": "https://registry.npmmirror.com/prop-types/-/prop-types-15.8.1.tgz",
@@ -8372,6 +8423,21 @@
         "react-dom": "^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
         "react-dom": "^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
       }
       }
     },
     },
+    "node_modules/readable-stream": {
+      "version": "2.3.8",
+      "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz",
+      "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==",
+      "license": "MIT",
+      "dependencies": {
+        "core-util-is": "~1.0.0",
+        "inherits": "~2.0.3",
+        "isarray": "~1.0.0",
+        "process-nextick-args": "~2.0.0",
+        "safe-buffer": "~5.1.1",
+        "string_decoder": "~1.1.1",
+        "util-deprecate": "~1.0.1"
+      }
+    },
     "node_modules/recma-build-jsx": {
     "node_modules/recma-build-jsx": {
       "version": "1.0.0",
       "version": "1.0.0",
       "resolved": "https://registry.npmmirror.com/recma-build-jsx/-/recma-build-jsx-1.0.0.tgz",
       "resolved": "https://registry.npmmirror.com/recma-build-jsx/-/recma-build-jsx-1.0.0.tgz",
@@ -8669,6 +8735,12 @@
       "integrity": "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==",
       "integrity": "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==",
       "license": "BSD-3-Clause"
       "license": "BSD-3-Clause"
     },
     },
+    "node_modules/safe-buffer": {
+      "version": "5.1.2",
+      "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+      "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
+      "license": "MIT"
+    },
     "node_modules/safer-buffer": {
     "node_modules/safer-buffer": {
       "version": "2.1.2",
       "version": "2.1.2",
       "resolved": "https://registry.npmmirror.com/safer-buffer/-/safer-buffer-2.1.2.tgz",
       "resolved": "https://registry.npmmirror.com/safer-buffer/-/safer-buffer-2.1.2.tgz",
@@ -8719,6 +8791,12 @@
         "node": ">=10"
         "node": ">=10"
       }
       }
     },
     },
+    "node_modules/setimmediate": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz",
+      "integrity": "sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA==",
+      "license": "MIT"
+    },
     "node_modules/shebang-command": {
     "node_modules/shebang-command": {
       "version": "2.0.0",
       "version": "2.0.0",
       "resolved": "https://registry.npmmirror.com/shebang-command/-/shebang-command-2.0.0.tgz",
       "resolved": "https://registry.npmmirror.com/shebang-command/-/shebang-command-2.0.0.tgz",
@@ -8802,6 +8880,15 @@
       "dev": true,
       "dev": true,
       "license": "MIT"
       "license": "MIT"
     },
     },
+    "node_modules/string_decoder": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
+      "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
+      "license": "MIT",
+      "dependencies": {
+        "safe-buffer": "~5.1.0"
+      }
+    },
     "node_modules/stringify-entities": {
     "node_modules/stringify-entities": {
       "version": "4.0.4",
       "version": "4.0.4",
       "resolved": "https://registry.npmmirror.com/stringify-entities/-/stringify-entities-4.0.4.tgz",
       "resolved": "https://registry.npmmirror.com/stringify-entities/-/stringify-entities-4.0.4.tgz",
@@ -9263,6 +9350,12 @@
         "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
         "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
       }
       }
     },
     },
+    "node_modules/util-deprecate": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+      "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==",
+      "license": "MIT"
+    },
     "node_modules/utility-types": {
     "node_modules/utility-types": {
       "version": "3.11.0",
       "version": "3.11.0",
       "resolved": "https://registry.npmmirror.com/utility-types/-/utility-types-3.11.0.tgz",
       "resolved": "https://registry.npmmirror.com/utility-types/-/utility-types-3.11.0.tgz",

+ 2 - 1
frontend/react-template/package.json

@@ -14,6 +14,7 @@
     "@douyinfe/semi-ui": "^2.56.0",
     "@douyinfe/semi-ui": "^2.56.0",
     "axios": "^1.6.0",
     "axios": "^1.6.0",
     "d3": "^7.8.5",
     "d3": "^7.8.5",
+    "jszip": "^3.10.1",
     "react": "^18.2.0",
     "react": "^18.2.0",
     "react-dom": "^18.2.0",
     "react-dom": "^18.2.0",
     "react-error-boundary": "^6.1.1",
     "react-error-boundary": "^6.1.1",
@@ -42,4 +43,4 @@
     "vite": "^5.0.8",
     "vite": "^5.0.8",
     "vitest": "^4.0.18"
     "vitest": "^4.0.18"
   }
   }
-}
+}

+ 16 - 0
frontend/react-template/src/api/traceApi.ts

@@ -70,4 +70,20 @@ export const traceApi = {
       temperature?: number;
       temperature?: number;
     }>(`/api/examples/${projectName}/prompt`);
     }>(`/api/examples/${projectName}/prompt`);
   },
   },
+  uploadTraces(file: File) {
+    const formData = new FormData();
+    formData.append("file", file);
+    return request<{
+      success: boolean;
+      message: string;
+      imported_traces: string[];
+      failed_traces: Array<{ trace_id: string; reason: string }>;
+    }>("/api/traces/upload", {
+      method: "POST",
+      data: formData,
+      headers: {
+        "Content-Type": "multipart/form-data",
+      },
+    });
+  },
 };
 };

+ 24 - 26
frontend/react-template/src/components/FlowChart/FlowChart.tsx

@@ -1050,38 +1050,36 @@ const FlowChartComponent: ForwardRefRenderFunction<FlowChartRef, FlowChartProps>
                             : "drop-shadow(0 1px 2px rgb(0 0 0 / 0.05))",
                             : "drop-shadow(0 1px 2px rgb(0 0 0 / 0.05))",
                       }}
                       }}
                     />
                     />
-                    {/* 节点文本(带 Tooltip) */}
+                    {/* 节点文本 */}
                     <foreignObject
                     <foreignObject
                       x={-70}
                       x={-70}
                       y={-25}
                       y={-25}
                       width={150}
                       width={150}
                       height={50}
                       height={50}
                     >
                     >
-                      <Tooltip content={text}>
-                        <div
-                          className="w-full h-full overflow-hidden flex items-center px-2 gap-2"
-                          style={{
-                            color: textColor,
-                            justifyContent: thumbnail ? "space-between" : "center",
-                          }}
-                        >
-                          <span className={`text-xs line-clamp-3 ${thumbnail ? "flex-1 text-left" : "text-center"}`}>
-                            {text}
-                          </span>
-                          {thumbnail && (
-                            <img
-                              src={thumbnail}
-                              alt="thumb"
-                              className="w-8 h-8 object-cover rounded border border-gray-200 bg-white flex-shrink-0 hover:scale-110 transition-transform"
-                              loading="lazy"
-                              onClick={(e) => {
-                                e.stopPropagation();
-                                setPreviewImage(thumbnail);
-                              }}
-                            />
-                          )}
-                        </div>
-                      </Tooltip>
+                      <div
+                        className="w-full h-full overflow-hidden flex items-center px-2 gap-2"
+                        style={{
+                          color: textColor,
+                          justifyContent: thumbnail ? "space-between" : "center",
+                        }}
+                      >
+                        <span className={`text-xs line-clamp-3 ${thumbnail ? "flex-1 text-left" : "text-center"}`}>
+                          {text}
+                        </span>
+                        {thumbnail && (
+                          <img
+                            src={thumbnail}
+                            alt="thumb"
+                            className="w-8 h-8 object-cover rounded border border-gray-200 bg-white flex-shrink-0 hover:scale-110 transition-transform"
+                            loading="lazy"
+                            onClick={(e) => {
+                              e.stopPropagation();
+                              setPreviewImage(thumbnail);
+                            }}
+                          />
+                        )}
+                      </div>
                     </foreignObject>
                     </foreignObject>
                   </g>
                   </g>
                 );
                 );

+ 0 - 1
frontend/react-template/src/components/FlowChart/components/Edge.tsx

@@ -94,7 +94,6 @@ export const Edge: FC<EdgeProps> = ({ link, label, highlighted, dimmed, onClick,
           onClick();
           onClick();
         }}
         }}
       >
       >
-        <title>{label}</title>
         <rect
         <rect
           x={-30}
           x={-30}
           y={-10}
           y={-10}

+ 1 - 2
frontend/react-template/src/components/FlowChart/components/Node.tsx

@@ -30,7 +30,6 @@ export const Node: FC<NodeProps> = ({ node, selected, highlighted, dimmed, onCli
       onClick={onClick}
       onClick={onClick}
       style={{ cursor: "pointer" }}
       style={{ cursor: "pointer" }}
     >
     >
-      <title>{data.description}</title>
       <rect
       <rect
         x={-70}
         x={-70}
         y={-26}
         y={-26}
@@ -49,7 +48,7 @@ export const Node: FC<NodeProps> = ({ node, selected, highlighted, dimmed, onCli
         fontWeight={selected || highlighted ? 600 : 400}
         fontWeight={selected || highlighted ? 600 : 400}
         style={{ opacity: dimmed ? 0.35 : 1, pointerEvents: "none" }}
         style={{ opacity: dimmed ? 0.35 : 1, pointerEvents: "none" }}
       >
       >
-        {truncateMiddle(data.description || data.id, 10)}
+        {truncateMiddle(data.description || data.id, 20)}
       </text>
       </text>
       {data.status === "running" && (
       {data.status === "running" && (
         <circle
         <circle

+ 8 - 5
frontend/react-template/src/components/MainContent/MainContent.tsx

@@ -130,12 +130,15 @@ export const MainContent: FC<MainContentProps> = ({
               const trace = traceList.find((t) => t.trace_id === value);
               const trace = traceList.find((t) => t.trace_id === value);
               onTraceChange?.(value as string, trace?.task || trace?.trace_id);
               onTraceChange?.(value as string, trace?.task || trace?.trace_id);
             }}
             }}
-            style={{ width: 200 }}
+            style={{ width: 400 }}
             placeholder="选择 Trace"
             placeholder="选择 Trace"
-            optionList={traceList.map((t) => ({
-              label: t.task?.length > 15 ? `${t.task.slice(0, 15)}...` : t.task || t.trace_id,
-              value: t.trace_id,
-            }))}
+            optionList={traceList.map((t) => {
+              const taskDesc = t.task && t.task.length > 20 ? `${t.task.slice(0, 20)}...` : t.task;
+              return {
+                label: taskDesc ? `${t.trace_id} - ${taskDesc}` : t.trace_id,
+                value: t.trace_id,
+              };
+            })}
           />
           />
           {/* <div className={styles.status}>{connected ? "WebSocket 已连接" : "WebSocket 未连接"}</div> */}
           {/* <div className={styles.status}>{connected ? "WebSocket 已连接" : "WebSocket 未连接"}</div> */}
           {/* <div className={styles.legend}>
           {/* <div className={styles.legend}>

+ 15 - 0
frontend/react-template/src/components/TopBar/TopBar.module.css

@@ -127,6 +127,21 @@
   background: #fef3c7; /* Amber 100 */
   background: #fef3c7; /* Amber 100 */
 }
 }
 
 
+/* Info Button */
+.button.info {
+  background: var(--bg-surface);
+  color: #3b82f6; /* Blue 500 */
+  border-color: #3b82f6;
+}
+
+.button.info:hover:not(:disabled) {
+  background: #eff6ff; /* Blue 50 */
+}
+
+.button.info:active:not(:disabled) {
+  background: #dbeafe; /* Blue 100 */
+}
+
 .button:disabled {
 .button:disabled {
   opacity: 0.5;
   opacity: 0.5;
   cursor: not-allowed;
   cursor: not-allowed;

+ 67 - 0
frontend/react-template/src/components/TopBar/TopBar.tsx

@@ -2,6 +2,7 @@ import { useCallback, useEffect, useState, useRef } from "react";
 import type { FC } from "react";
 import type { FC } from "react";
 import ReactMarkdown from "react-markdown";
 import ReactMarkdown from "react-markdown";
 import { Modal, Form, Toast } from "@douyinfe/semi-ui";
 import { Modal, Form, Toast } from "@douyinfe/semi-ui";
+import JSZip from "jszip";
 import { traceApi } from "../../api/traceApi";
 import { traceApi } from "../../api/traceApi";
 import type { Goal } from "../../types/goal";
 import type { Goal } from "../../types/goal";
 import type { Message } from "../../types/message";
 import type { Message } from "../../types/message";
@@ -33,6 +34,7 @@ export const TopBar: FC<TopBarProps> = ({
   const [exampleProjects, setExampleProjects] = useState<Array<{ name: string; path: string; has_prompt: boolean }>>(
   const [exampleProjects, setExampleProjects] = useState<Array<{ name: string; path: string; has_prompt: boolean }>>(
     [],
     [],
   );
   );
+  const [isUploading, setIsUploading] = useState(false);
   // 控制中心面板
   // 控制中心面板
   const [isControlPanelVisible, setIsControlPanelVisible] = useState(false);
   const [isControlPanelVisible, setIsControlPanelVisible] = useState(false);
 
 
@@ -248,6 +250,59 @@ export const TopBar: FC<TopBarProps> = ({
     }
     }
   };
   };
 
 
+  const handleUpload = async (event: React.ChangeEvent<HTMLInputElement>) => {
+    const files = event.target.files;
+    if (!files || files.length === 0) return;
+
+    setIsUploading(true);
+    try {
+      // 创建 ZIP 文件
+      const zip = new JSZip();
+
+      // 将所有文件添加到 ZIP
+      for (let i = 0; i < files.length; i++) {
+        const file = files[i];
+        // 使用 webkitRelativePath 保持文件夹结构
+        const path = file.webkitRelativePath || file.name;
+        zip.file(path, file);
+      }
+
+      // 生成 ZIP blob
+      Toast.info("正在压缩文件...");
+      const zipBlob = await zip.generateAsync({ type: "blob" });
+
+      // 创建 File 对象
+      const zipFile = new File([zipBlob], "traces.zip", { type: "application/zip" });
+
+      // 上传
+      Toast.info("正在上传...");
+      const result = await traceApi.uploadTraces(zipFile);
+
+      if (result.success) {
+        Toast.success(result.message);
+        // 刷新 trace 列表
+        loadTraces();
+        if (onTraceCreated) {
+          onTraceCreated();
+        }
+      } else {
+        Toast.warning(result.message);
+      }
+
+      // 显示详细信息
+      if (result.failed_traces.length > 0) {
+        console.warn("Failed traces:", result.failed_traces);
+      }
+    } catch (error) {
+      console.error("Failed to upload traces:", error);
+      Toast.error("上传失败");
+    } finally {
+      setIsUploading(false);
+      // 清空 input,允许重复上传同一文件
+      event.target.value = "";
+    }
+  };
+
   return (
   return (
     <>
     <>
       <header className={styles.topbar}>
       <header className={styles.topbar}>
@@ -291,6 +346,18 @@ export const TopBar: FC<TopBarProps> = ({
           >
           >
             经验
             经验
           </button>
           </button>
+          <label className={`${styles.button} ${styles.info}`} style={{ cursor: 'pointer' }}>
+            {isUploading ? "上传中..." : "📤 导入"}
+            <input
+              type="file"
+              webkitdirectory=""
+              directory=""
+              multiple
+              onChange={handleUpload}
+              disabled={isUploading}
+              style={{ display: 'none' }}
+            />
+          </label>
         </div>
         </div>
         <Modal
         <Modal
           title={<div className="w-full text-center">新建任务</div>}
           title={<div className="w-full text-center">新建任务</div>}

+ 8 - 0
frontend/react-template/src/global.d.ts

@@ -2,3 +2,11 @@ declare module "*.module.css" {
   const classes: { [key: string]: string };
   const classes: { [key: string]: string };
   export default classes;
   export default classes;
 }
 }
+
+// 扩展 HTMLInputElement 以支持 webkitdirectory 属性
+declare module "react" {
+  interface InputHTMLAttributes<T> extends HTMLAttributes<T> {
+    webkitdirectory?: string;
+    directory?: string;
+  }
+}

+ 2 - 2
knowhub/docs/knowledge-management.md

@@ -108,7 +108,7 @@ KnowHub 采用 Milvus Lite 单一存储架构(详见 `knowhub/docs/decisions.m
 - **id**: 唯一标识,格式 `knowledge-{timestamp}-{random}`
 - **id**: 唯一标识,格式 `knowledge-{timestamp}-{random}`
 - **message_id**: 来源 Message ID(用于精确溯源到具体消息)
 - **message_id**: 来源 Message ID(用于精确溯源到具体消息)
 - **types**: 知识类型数组(可多选)
 - **types**: 知识类型数组(可多选)
-  - `user_profile`: 用户偏好、习惯、背景
+  - `user_profile`: 特定用户偏好、习惯、背景
   - `strategy`: 执行经验(从反思中获得)
   - `strategy`: 执行经验(从反思中获得)
   - `tool`: 工具使用方法、优缺点、代码示例
   - `tool`: 工具使用方法、优缺点、代码示例
   - `usecase`: 用户背景、方案、步骤、效果
   - `usecase`: 用户背景、方案、步骤、效果
@@ -459,7 +459,7 @@ return ToolResult(
 - `top_k`: 返回数量(默认 5)
 - `top_k`: 返回数量(默认 5)
 - `min_score`: 最低评分过滤(默认 3)
 - `min_score`: 最低评分过滤(默认 3)
 - `types`: 按类型过滤(可选,逗号分隔)
 - `types`: 按类型过滤(可选,逗号分隔)
-- `owner`: 按所有者过滤(可选)
+- `owner`: 按所有者过滤(可选,支持多个owner用逗号分隔,如 "user1@example.com,user2@example.com"
 
 
 **检索流程**:
 **检索流程**:
 
 

+ 14 - 2
knowhub/server.py

@@ -612,7 +612,13 @@ async def search_knowledge_api(
             for t in type_list:
             for t in type_list:
                 filters.append(f'array_contains(types, "{t}")')
                 filters.append(f'array_contains(types, "{t}")')
         if owner:
         if owner:
-            filters.append(f'owner == "{owner}"')
+            owner_list = [o.strip() for o in owner.split(',') if o.strip()]
+            if len(owner_list) == 1:
+                filters.append(f'owner == "{owner_list[0]}"')
+            else:
+                # 多个owner用OR连接
+                owner_filters = [f'owner == "{o}"' for o in owner_list]
+                filters.append(f'({" or ".join(owner_filters)})')
 
 
         # 添加 min_score 过滤
         # 添加 min_score 过滤
         filters.append(f'eval["score"] >= {min_score}')
         filters.append(f'eval["score"] >= {min_score}')
@@ -747,7 +753,13 @@ def list_knowledge(
             filters.append(f'array_contains(scopes, "{scopes}")')
             filters.append(f'array_contains(scopes, "{scopes}")')
 
 
         if owner:
         if owner:
-            filters.append(f'owner like "%{owner}%"')
+            owner_list = [o.strip() for o in owner.split(',') if o.strip()]
+            if len(owner_list) == 1:
+                filters.append(f'owner like "%{owner_list[0]}%"')
+            else:
+                # 多个owner用OR连接
+                owner_filters = [f'owner like "%{o}%"' for o in owner_list]
+                filters.append(f'({" or ".join(owner_filters)})')
 
 
         # tags 支持多个,用 AND 连接(使用 tag_keys 数组进行高效筛选)
         # tags 支持多个,用 AND 连接(使用 tag_keys 数组进行高效筛选)
         if tags:
         if tags: