|
@@ -20,6 +20,7 @@ from applications.resource import get_resource_manager
|
|
|
from applications.search import HybridSearch
|
|
|
from applications.utils.chat import RAGChatAgent
|
|
|
from applications.utils.mysql import Dataset, Contents, ContentChunks, ChatResult
|
|
|
+from applications.api.qwen import QwenClient
|
|
|
from applications.utils.spider.study import study
|
|
|
|
|
|
server_bp = Blueprint("api", __name__, url_prefix="/api")
|
|
@@ -413,17 +414,17 @@ async def chat():
|
|
|
result["datasetName"] = dataset_name
|
|
|
|
|
|
rag_chat_agent = RAGChatAgent()
|
|
|
+ qwen_client = QwenClient()
|
|
|
chat_result = await rag_chat_agent.chat_with_deepseek(query_text, query_results)
|
|
|
- # study_task_id = None
|
|
|
- # if chat_result["status"] == 0:
|
|
|
- # study_task_id = study(query_text)['task_id']
|
|
|
- llm_search = await rag_chat_agent.llm_search(query_text)
|
|
|
- decision = await rag_chat_agent.make_decision(chat_result, llm_search)
|
|
|
+ llm_search = qwen_client.search_and_chat(
|
|
|
+ user_prompt=query_text, search_strategy="agent"
|
|
|
+ )
|
|
|
+ decision = await rag_chat_agent.make_decision(query_text, chat_result, llm_search)
|
|
|
data = {
|
|
|
"results": query_results,
|
|
|
"chat_res": decision["result"],
|
|
|
"rag_summary": chat_result["summary"],
|
|
|
- "llm_summary": llm_search["answer"],
|
|
|
+ "llm_summary": llm_search["content"],
|
|
|
# "used_tools": decision["used_tools"],
|
|
|
}
|
|
|
await chat_result_mapper.insert_chat_result(
|
|
@@ -433,9 +434,9 @@ async def chat():
|
|
|
chat_result["summary"],
|
|
|
chat_result["relevance_score"],
|
|
|
chat_result["status"],
|
|
|
- llm_search["answer"],
|
|
|
- llm_search["source"],
|
|
|
- llm_search["status"],
|
|
|
+ llm_search["content"],
|
|
|
+ json.dumps(llm_search["search_results"], ensure_ascii=False),
|
|
|
+ 1,
|
|
|
decision["result"],
|
|
|
is_web=1,
|
|
|
)
|
|
@@ -561,11 +562,13 @@ async def process_question(question, query_text, rag_chat_agent):
|
|
|
if chat_result["status"] == 0:
|
|
|
study_task_id = study(question)["task_id"]
|
|
|
|
|
|
- # 异步获取 LLM 搜索结果
|
|
|
- llm_search_result = await rag_chat_agent.llm_search(question)
|
|
|
-
|
|
|
- # 执行决策逻辑
|
|
|
- decision = await rag_chat_agent.make_decision(chat_result, llm_search_result)
|
|
|
+ qwen_client = QwenClient()
|
|
|
+ llm_search = qwen_client.search_and_chat(
|
|
|
+ user_prompt=query, search_strategy="agent"
|
|
|
+ )
|
|
|
+ decision = await rag_chat_agent.make_decision(
|
|
|
+ query_text, chat_result, llm_search
|
|
|
+ )
|
|
|
|
|
|
# 构建返回的数据
|
|
|
data = {
|
|
@@ -584,9 +587,9 @@ async def process_question(question, query_text, rag_chat_agent):
|
|
|
chat_result["summary"],
|
|
|
chat_result["relevance_score"],
|
|
|
chat_result["status"],
|
|
|
- llm_search_result["answer"],
|
|
|
- llm_search_result["source"],
|
|
|
- llm_search_result["status"],
|
|
|
+ llm_search["content"],
|
|
|
+ json.dumps(llm_search["search_results"], ensure_ascii=False),
|
|
|
+ 1,
|
|
|
decision["result"],
|
|
|
study_task_id,
|
|
|
)
|