| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126 |
- """
- Research Agent — 深度调研 Agent,部署在 KnowHub 服务器端。
- 通过 HTTP API 被 FastAPI server 调用,每次请求是一次 AgentRunner.run()。
- 续跑由 caller 显式传入 continue_from 指定。
- """
- import logging
- import sys
- from pathlib import Path
- from typing import Dict, Any, Optional, List
- # 确保项目路径可用
- sys.path.insert(0, str(Path(__file__).parent.parent.parent))
- from agent.core.runner import AgentRunner, RunConfig
- from agent.trace import FileSystemTraceStore
- from agent.llm import create_qwen_llm_call
- from agent.llm.prompts import SimplePrompt
- from agent.tools.builtin.knowledge import KnowledgeConfig
- logger = logging.getLogger("agents.research")
- # Research 目前不支持调用方注入 skill(skills 走 config.skills 服务器端固定)
- ALLOWED_SKILLS: List[str] = []
- # ===== 单例 Runner =====
- _runner: Optional[AgentRunner] = None
- _prompt_messages = None
- _initialized = False
- def _ensure_initialized():
- """延迟初始化 Runner 和 Prompt(首次调用时执行)"""
- global _runner, _prompt_messages, _initialized
- if _initialized:
- return
- _initialized = True
- skills_dir = Path(__file__).parent / "skills"
- _runner = AgentRunner(
- trace_store=FileSystemTraceStore(base_path=".trace"),
- llm_call=create_qwen_llm_call(model="qwen3.5-plus"),
- skills_dir=str(skills_dir) if skills_dir.exists() else None,
- debug=True,
- logger_name="agents.research",
- )
- prompt_path = Path(__file__).parent / "research_agent.prompt"
- if prompt_path.exists():
- prompt = SimplePrompt(prompt_path)
- _prompt_messages = prompt.build_messages()
- if getattr(prompt, "meta", None) and prompt.meta.get("model"):
- model_name = prompt.meta["model"]
- _runner.llm_call = create_qwen_llm_call(model=model_name)
- else:
- _prompt_messages = []
- logger.warning(f"Research prompt 文件不存在: {prompt_path}")
- logger.info("✓ Research Agent 已初始化")
- # ===== 核心方法 =====
- async def research(
- query: str,
- continue_from: Optional[str] = None,
- skills: Optional[List[str]] = None,
- ) -> Dict[str, Any]:
- """
- 同步执行深度调研。运行 Research Agent,返回标准 agent 结果。
- Args:
- query: 用户设定的研究主题或查询
- continue_from: 已有 sub_trace_id,传入则续跑该 trace
- skills: 保留参数(Research 当前无白名单 skill);传入会被忽略
- Returns:
- {"status", "sub_trace_id", "summary", "stats", "error"?}
- """
- if skills:
- logger.warning(f"[Research] 忽略 skills 参数(Research 不接受动态 skill 注入)")
- _ensure_initialized()
- # 初始化云端无头浏览器(线上部署必须用云浏览器)
- try:
- from agent.tools.builtin.browser import init_browser_session
- await init_browser_session(browser_type="cloud")
- except Exception as e:
- logger.warning(f"Failed to init cloud browser: {e}")
- config = RunConfig(
- model="qwen3.5-plus",
- temperature=0.3,
- max_iterations=200,
- tool_groups=["core", "content", "browser"],
- exclude_tools=["agent", "evaluate"], # 远端 Agent 禁止递归派生子 Agent
- skills=["planning", "research", "browser"],
- # 远端 Agent 关闭自动知识操作(否则 injection 会回调 remote_librarian 形成递归)
- knowledge=KnowledgeConfig(
- enable_extraction=False,
- enable_completion_extraction=False,
- enable_injection=False,
- ),
- )
- config.trace_id = continue_from # None = 新建;有值 = 续跑
- content = f"[RESEARCH TASK] {query}"
- if continue_from is None:
- messages = _prompt_messages + [{"role": "user", "content": content}]
- else:
- messages = [{"role": "user", "content": content}]
- result = await _runner.run_result(messages=messages, config=config)
- return {
- "status": result.get("status", "unknown"),
- "sub_trace_id": result.get("trace_id"),
- "summary": result.get("summary", ""),
- "stats": result.get("stats", {}),
- "error": result.get("error"),
- }
|