research.py 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. """
  2. Research Agent — 深度调研 Agent,部署在 KnowHub 服务器端。
  3. 通过 HTTP API 被 FastAPI server 调用,每次请求是一次 AgentRunner.run()。
  4. 续跑由 caller 显式传入 continue_from 指定。
  5. """
  6. import logging
  7. import sys
  8. from pathlib import Path
  9. from typing import Dict, Any, Optional, List
  10. # 确保项目路径可用
  11. sys.path.insert(0, str(Path(__file__).parent.parent.parent))
  12. from agent.core.runner import AgentRunner, RunConfig
  13. from agent.trace import FileSystemTraceStore
  14. from agent.llm import create_qwen_llm_call
  15. from agent.llm.prompts import SimplePrompt
  16. from agent.tools.builtin.knowledge import KnowledgeConfig
  17. logger = logging.getLogger("agents.research")
  18. # Research 目前不支持调用方注入 skill(skills 走 config.skills 服务器端固定)
  19. ALLOWED_SKILLS: List[str] = []
  20. # ===== 单例 Runner =====
  21. _runner: Optional[AgentRunner] = None
  22. _prompt_messages = None
  23. _initialized = False
  24. def _ensure_initialized():
  25. """延迟初始化 Runner 和 Prompt(首次调用时执行)"""
  26. global _runner, _prompt_messages, _initialized
  27. if _initialized:
  28. return
  29. _initialized = True
  30. skills_dir = Path(__file__).parent / "skills"
  31. _runner = AgentRunner(
  32. trace_store=FileSystemTraceStore(base_path=".trace"),
  33. llm_call=create_qwen_llm_call(model="qwen3.5-plus"),
  34. skills_dir=str(skills_dir) if skills_dir.exists() else None,
  35. debug=True,
  36. logger_name="agents.research",
  37. )
  38. prompt_path = Path(__file__).parent / "research_agent.prompt"
  39. if prompt_path.exists():
  40. prompt = SimplePrompt(prompt_path)
  41. _prompt_messages = prompt.build_messages()
  42. if getattr(prompt, "meta", None) and prompt.meta.get("model"):
  43. model_name = prompt.meta["model"]
  44. _runner.llm_call = create_qwen_llm_call(model=model_name)
  45. else:
  46. _prompt_messages = []
  47. logger.warning(f"Research prompt 文件不存在: {prompt_path}")
  48. logger.info("✓ Research Agent 已初始化")
  49. # ===== 核心方法 =====
  50. async def research(
  51. query: str,
  52. continue_from: Optional[str] = None,
  53. skills: Optional[List[str]] = None,
  54. ) -> Dict[str, Any]:
  55. """
  56. 同步执行深度调研。运行 Research Agent,返回标准 agent 结果。
  57. Args:
  58. query: 用户设定的研究主题或查询
  59. continue_from: 已有 sub_trace_id,传入则续跑该 trace
  60. skills: 保留参数(Research 当前无白名单 skill);传入会被忽略
  61. Returns:
  62. {"status", "sub_trace_id", "summary", "stats", "error"?}
  63. """
  64. if skills:
  65. logger.warning(f"[Research] 忽略 skills 参数(Research 不接受动态 skill 注入)")
  66. _ensure_initialized()
  67. # 初始化云端无头浏览器(线上部署必须用云浏览器)
  68. try:
  69. from agent.tools.builtin.browser import init_browser_session
  70. await init_browser_session(browser_type="cloud")
  71. except Exception as e:
  72. logger.warning(f"Failed to init cloud browser: {e}")
  73. config = RunConfig(
  74. model="qwen3.5-plus",
  75. temperature=0.3,
  76. max_iterations=200,
  77. tool_groups=["core", "content", "browser"],
  78. exclude_tools=["agent", "evaluate"], # 远端 Agent 禁止递归派生子 Agent
  79. skills=["planning", "research", "browser"],
  80. # 远端 Agent 关闭自动知识操作(否则 injection 会回调 remote_librarian 形成递归)
  81. knowledge=KnowledgeConfig(
  82. enable_extraction=False,
  83. enable_completion_extraction=False,
  84. enable_injection=False,
  85. ),
  86. )
  87. config.trace_id = continue_from # None = 新建;有值 = 续跑
  88. content = f"[RESEARCH TASK] {query}"
  89. if continue_from is None:
  90. messages = _prompt_messages + [{"role": "user", "content": content}]
  91. else:
  92. messages = [{"role": "user", "content": content}]
  93. result = await _runner.run_result(messages=messages, config=config)
  94. return {
  95. "status": result.get("status", "unknown"),
  96. "sub_trace_id": result.get("trace_id"),
  97. "summary": result.get("summary", ""),
  98. "stats": result.get("stats", {}),
  99. "error": result.get("error"),
  100. }