server.py 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. import asyncio
  2. import json
  3. from typing import Any, Dict, List
  4. import mcp.types as types
  5. from mcp.server.lowlevel import Server
  6. from applications.resource import get_resource_manager
  7. from applications.utils.chat import RAGChatAgent
  8. from applications.utils.mysql import ChatResult
  9. from applications.utils.spider.study import study
  10. from routes.buleprint import query_search
  11. def create_mcp_server() -> Server:
  12. """创建并配置MCP服务器"""
  13. app = Server("mcp-rag-server")
  14. @app.call_tool()
  15. async def call_tool(
  16. name: str, arguments: Dict[str, Any]
  17. ) -> List[types.TextContent]:
  18. """处理工具调用"""
  19. # ctx = app.request_context
  20. if name == "rag-search":
  21. data = await rag_search(arguments["query_text"])
  22. result = json.dumps(data, ensure_ascii=False, indent=2)
  23. else:
  24. raise ValueError(f"Unknown tool: {name}")
  25. return [types.TextContent(type="text", text=result)]
  26. @app.list_tools()
  27. async def list_tools() -> List[types.Tool]:
  28. return [
  29. types.Tool(
  30. name="rag-search",
  31. title="RAG搜索",
  32. description="搜索内容并生成总结",
  33. inputSchema={
  34. "type": "object",
  35. "properties": {
  36. "query_text": {
  37. "type": "string",
  38. "description": "用户输入的查询文本",
  39. }
  40. },
  41. "required": ["query_text"], # 只强制 query_text 必填
  42. "additionalProperties": False,
  43. },
  44. ),
  45. ]
  46. return app
  47. async def process_question(question, query_text, rag_chat_agent):
  48. try:
  49. dataset_id_strs = "11,12"
  50. dataset_ids = dataset_id_strs.split(",")
  51. search_type = "hybrid"
  52. # 执行查询任务
  53. query_results = await query_search(
  54. query_text=question,
  55. filters={"dataset_id": dataset_ids},
  56. search_type=search_type,
  57. )
  58. resource = get_resource_manager()
  59. chat_result_mapper = ChatResult(resource.mysql_client)
  60. # 异步执行 chat 与 deepseek 的对话
  61. chat_result = await rag_chat_agent.chat_with_deepseek(question, query_results)
  62. # # 判断是否需要执行 study
  63. study_task_id = None
  64. if chat_result["status"] == 0:
  65. study_task_id = study(question)["task_id"]
  66. # 异步获取 LLM 搜索结果
  67. llm_search_result = await rag_chat_agent.llm_search(question)
  68. # 执行决策逻辑
  69. decision = await rag_chat_agent.make_decision(chat_result, llm_search_result)
  70. # 构建返回的数据
  71. data = {
  72. "query": question,
  73. "result": decision["result"],
  74. "status": decision["status"],
  75. "relevance_score": decision["relevance_score"],
  76. }
  77. # 插入数据库
  78. await chat_result_mapper.insert_chat_result(
  79. question,
  80. dataset_id_strs,
  81. json.dumps(query_results, ensure_ascii=False),
  82. chat_result["summary"],
  83. chat_result["relevance_score"],
  84. chat_result["status"],
  85. llm_search_result["answer"],
  86. llm_search_result["source"],
  87. llm_search_result["status"],
  88. decision["result"],
  89. study_task_id,
  90. )
  91. return data
  92. except Exception as e:
  93. print(f"Error processing question: {question}. Error: {str(e)}")
  94. return {"query": question, "error": str(e)}
  95. async def rag_search(query_text: str):
  96. rag_chat_agent = RAGChatAgent()
  97. spilt_query = await rag_chat_agent.split_query(query_text)
  98. split_questions = spilt_query["split_questions"]
  99. split_questions.append(query_text)
  100. # 使用asyncio.gather并行处理每个问题
  101. tasks = [
  102. process_question(question, query_text, rag_chat_agent)
  103. for question in split_questions
  104. ]
  105. # 等待所有任务完成并收集结果
  106. data_list = await asyncio.gather(*tasks)
  107. return data_list