| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329 |
- """
- Librarian Agent — KnowHub 的知识管理 Agent
- 通过 HTTP API 被 FastAPI server 调用,每次请求是一次 AgentRunner.run()。
- 状态全部持久化在 trace 中,通过 trace_id 续跑实现跨请求上下文积累。
- 两种调用模式:
- - ask: 同步,运行 Agent 处理查询,等待完成后返回结果
- - upload: 异步,存 buffer 后由后台任务运行 Agent 处理
- """
- import json
- import logging
- import sys
- from pathlib import Path
- from typing import Optional, Dict, Any
- # 确保项目路径可用
- sys.path.insert(0, str(Path(__file__).parent.parent.parent))
- from agent.core.runner import AgentRunner, RunConfig
- from agent.trace import FileSystemTraceStore, Trace, Message
- from agent.llm import create_qwen_llm_call
- from agent.llm.prompts import SimplePrompt
- from agent.tools.builtin.knowledge import KnowledgeConfig
- logger = logging.getLogger("agents.librarian")
- # ===== 配置 =====
- ENABLE_DATABASE_COMMIT = False
- # caller trace_id → librarian trace_id 的映射持久化文件
- TRACE_MAP_FILE = Path(".cache/.knowledge/trace_map.json")
- def get_librarian_config(enable_db_commit: bool = ENABLE_DATABASE_COMMIT) -> RunConfig:
- """获取 Librarian Agent 配置"""
- tools = [
- "knowledge_search",
- "tool_search",
- "capability_search",
- "requirement_search",
- "read_file", "write_file",
- "list_cache_status",
- "match_tree_nodes",
- "skill",
- ]
- if enable_db_commit:
- tools.extend(["commit_to_database", "organize_cached_data", "cache_research_data"])
- else:
- tools.extend(["organize_cached_data", "cache_research_data"])
- return RunConfig(
- model="qwen3.5-plus",
- temperature=0.2,
- max_iterations=30,
- agent_type="default",
- name="Librarian Agent",
- goal_compression="on_complete",
- skills=[], # 不注入通用 skills(planning/research/browser),使用指定注入
- knowledge=KnowledgeConfig(
- enable_extraction=False,
- enable_completion_extraction=False,
- enable_injection=False,
- ),
- tools=tools,
- )
- def _register_internal_tools():
- """注册内部工具(缓存管理 + 树匹配),只需调用一次"""
- try:
- sys.path.insert(0, str(Path(__file__).parent.parent))
- from internal_tools.cache_manager import (
- cache_research_data,
- organize_cached_data,
- commit_to_database,
- list_cache_status,
- )
- from internal_tools.tree_matcher import match_tree_nodes
- from agent.tools import get_tool_registry
- registry = get_tool_registry()
- registry.register(cache_research_data)
- registry.register(organize_cached_data)
- registry.register(commit_to_database)
- registry.register(list_cache_status)
- registry.register(match_tree_nodes)
- logger.info("✓ 已注册 Librarian 内部工具")
- except Exception as e:
- logger.error(f"✗ 注册内部工具失败: {e}")
- # ===== trace_id 映射 =====
- def _load_trace_map() -> Dict[str, str]:
- if TRACE_MAP_FILE.exists():
- return json.loads(TRACE_MAP_FILE.read_text(encoding="utf-8"))
- return {}
- def _save_trace_map(mapping: Dict[str, str]):
- TRACE_MAP_FILE.parent.mkdir(parents=True, exist_ok=True)
- TRACE_MAP_FILE.write_text(json.dumps(mapping, indent=2, ensure_ascii=False), encoding="utf-8")
- def get_librarian_trace_id(caller_trace_id: str) -> Optional[str]:
- """根据调用方 trace_id 查找对应的 Librarian trace_id"""
- if not caller_trace_id:
- return None
- mapping = _load_trace_map()
- return mapping.get(caller_trace_id)
- def set_librarian_trace_id(caller_trace_id: str, librarian_trace_id: str):
- """记录映射"""
- if not caller_trace_id:
- return
- mapping = _load_trace_map()
- mapping[caller_trace_id] = librarian_trace_id
- _save_trace_map(mapping)
- # ===== 单例 Runner =====
- _runner: Optional[AgentRunner] = None
- _prompt_messages = None
- _initialized = False
- def _ensure_initialized():
- """延迟初始化 Runner 和 Prompt(首次调用时执行)"""
- global _runner, _prompt_messages, _initialized
- if _initialized:
- return
- _initialized = True
- _register_internal_tools()
- _runner = AgentRunner(
- trace_store=FileSystemTraceStore(base_path=".trace"),
- llm_call=create_qwen_llm_call(model="qwen3.5-plus"),
- skills_dir=str(Path(__file__).parent / "skills"),
- debug=True,
- logger_name="agents.librarian",
- )
- prompt_path = Path(__file__).parent / "librarian_agent.prompt"
- if prompt_path.exists():
- prompt = SimplePrompt(prompt_path)
- _prompt_messages = prompt.build_messages()
- else:
- _prompt_messages = []
- logger.warning(f"Librarian prompt 文件不存在: {prompt_path}")
- logger.info("✓ Librarian Agent 已初始化")
- # ===== 核心方法 =====
- async def ask(query: str, caller_trace_id: str = "") -> Dict[str, Any]:
- """
- 同步查询知识库。运行 Librarian Agent 处理查询,返回整合结果。
- Args:
- query: 查询内容
- caller_trace_id: 调用方 trace_id,用于续跑
- Returns:
- {"response": str, "source_ids": [str], "sources": [dict]}
- """
- _ensure_initialized()
- # 查找或创建 trace
- librarian_trace_id = get_librarian_trace_id(caller_trace_id)
- config = get_librarian_config()
- config.trace_id = librarian_trace_id # None = 新建, 有值 = 续跑
- # 构建消息
- content = f"[ASK] {query}"
- if librarian_trace_id is None:
- messages = _prompt_messages + [{"role": "user", "content": content}]
- else:
- messages = [{"role": "user", "content": content}]
- # 运行 Agent(指定注入 ask_strategy skill)
- response_text = ""
- actual_trace_id = None
- async for item in _runner.run(
- messages=messages, config=config,
- inject_skills=["ask_strategy"],
- skill_recency_threshold=20,
- ):
- if isinstance(item, Trace):
- actual_trace_id = item.trace_id
- elif isinstance(item, Message):
- if item.role == "assistant":
- msg_content = item.content
- if isinstance(msg_content, dict):
- text = msg_content.get("text", "")
- if text:
- response_text = text
- elif isinstance(msg_content, str) and msg_content:
- response_text = msg_content
- # 记录 trace 映射
- if actual_trace_id and caller_trace_id:
- set_librarian_trace_id(caller_trace_id, actual_trace_id)
- # 解析 source_ids(从 Agent 回复中提取,或从工具调用结果中提取)
- # Agent 回复中会引用 knowledge ID,格式如 [knowledge-xxx]
- import re
- source_ids = re.findall(r'\[?(knowledge-[a-zA-Z0-9_-]+)\]?', response_text)
- source_ids = list(dict.fromkeys(source_ids)) # 去重保序
- return {
- "response": response_text,
- "source_ids": source_ids,
- "sources": [], # TODO: 从 trace 的工具调用结果中提取 source 详情
- }
- async def process_upload(
- data: Dict[str, Any],
- caller_trace_id: str = "",
- buffer_file: Optional[str] = None,
- max_retries: int = 2,
- ):
- """
- 处理上传数据。运行 Librarian Agent 做图谱编排。
- 失败时重试,最终失败记录到 buffer 文件的状态中。
- Args:
- data: 上传数据 {knowledge, tools, resources}
- caller_trace_id: 调用方 trace_id
- buffer_file: 对应的 buffer 文件路径(用于更新状态)
- max_retries: 最大重试次数
- """
- _ensure_initialized()
- librarian_trace_id = get_librarian_trace_id(caller_trace_id)
- config = get_librarian_config()
- config.trace_id = librarian_trace_id
- content = f"[UPLOAD:BATCH] 收到上传请求,请处理:\n{json.dumps(data, ensure_ascii=False)}"
- if librarian_trace_id is None:
- messages = _prompt_messages + [{"role": "user", "content": content}]
- else:
- messages = [{"role": "user", "content": content}]
- last_error = None
- for attempt in range(max_retries + 1):
- try:
- actual_trace_id = None
- async for item in _runner.run(
- messages=messages, config=config,
- inject_skills=["upload_strategy"],
- skill_recency_threshold=10,
- ):
- if isinstance(item, Trace):
- actual_trace_id = item.trace_id
- if actual_trace_id and caller_trace_id:
- set_librarian_trace_id(caller_trace_id, actual_trace_id)
- # 成功:更新 buffer 文件状态
- _update_buffer_status(buffer_file, "completed", trace_id=actual_trace_id)
- logger.info(f"[Librarian] upload 处理完成,trace: {actual_trace_id}")
- return
- except Exception as e:
- last_error = str(e)
- logger.warning(f"[Librarian] upload 处理失败 (attempt {attempt + 1}/{max_retries + 1}): {e}")
- if attempt < max_retries:
- import asyncio
- await asyncio.sleep(2 ** attempt) # 1s, 2s 指数退避
- # 所有重试都失败
- _update_buffer_status(buffer_file, "failed", error=last_error)
- logger.error(f"[Librarian] upload 处理最终失败: {last_error}")
- def _update_buffer_status(buffer_file: Optional[str], status: str, trace_id: str = None, error: str = None):
- """更新 buffer 文件中的处理状态"""
- if not buffer_file:
- return
- try:
- from datetime import datetime as dt
- path = Path(buffer_file)
- if not path.exists():
- return
- data = json.loads(path.read_text(encoding="utf-8"))
- data["status"] = status
- data["processed_at"] = dt.now().isoformat()
- if trace_id:
- data["librarian_trace_id"] = trace_id
- if error:
- data["error"] = error
- path.write_text(json.dumps(data, ensure_ascii=False, indent=2), encoding="utf-8")
- except Exception as e:
- logger.warning(f"更新 buffer 状态失败: {e}")
- def list_pending_uploads() -> list:
- """列出所有未处理或失败的 upload buffer 文件"""
- buffer_dir = Path(".cache/.knowledge/buffer")
- if not buffer_dir.exists():
- return []
- pending = []
- for f in sorted(buffer_dir.glob("upload_*.json")):
- try:
- data = json.loads(f.read_text(encoding="utf-8"))
- status = data.get("status", "pending")
- if status in ("pending", "failed"):
- pending.append({
- "file": str(f),
- "status": status,
- "received_at": data.get("received_at", ""),
- "error": data.get("error", ""),
- "trace_id": data.get("trace_id", ""),
- })
- except Exception:
- pass
- return pending
|