|
@@ -414,9 +414,9 @@ async def chat():
|
|
|
|
|
|
rag_chat_agent = RAGChatAgent()
|
|
|
chat_result = await rag_chat_agent.chat_with_deepseek(query_text, query_results)
|
|
|
- study_task_id = None
|
|
|
- if chat_result["status"] == 0:
|
|
|
- study_task_id = study(query_text)['task_id']
|
|
|
+ # study_task_id = None
|
|
|
+ # if chat_result["status"] == 0:
|
|
|
+ # study_task_id = study(query_text)['task_id']
|
|
|
llm_search = await rag_chat_agent.llm_search(query_text)
|
|
|
decision = await rag_chat_agent.make_decision(chat_result, llm_search)
|
|
|
data = {
|
|
@@ -436,7 +436,6 @@ async def chat():
|
|
|
llm_search["source"],
|
|
|
llm_search["status"],
|
|
|
decision["result"],
|
|
|
- study_task_id,
|
|
|
is_web=1,
|
|
|
)
|
|
|
return jsonify({"status_code": 200, "detail": "success", "data": data})
|
|
@@ -518,44 +517,78 @@ async def delete_task():
|
|
|
async def rag_search():
|
|
|
body = await request.get_json()
|
|
|
query_text = body.get("queryText")
|
|
|
- dataset_id_strs = "11,12"
|
|
|
- dataset_ids = dataset_id_strs.split(",")
|
|
|
- search_type = "hybrid"
|
|
|
-
|
|
|
- query_results = await query_search(
|
|
|
- query_text=query_text,
|
|
|
- filters={"dataset_id": dataset_ids},
|
|
|
- search_type=search_type,
|
|
|
- limit=5,
|
|
|
- )
|
|
|
- resource = get_resource_manager()
|
|
|
- chat_result_mapper = ChatResult(resource.mysql_client)
|
|
|
rag_chat_agent = RAGChatAgent()
|
|
|
- chat_result = await rag_chat_agent.chat_with_deepseek(query_text, query_results)
|
|
|
- study_task_id = None
|
|
|
- if chat_result["status"] == 0:
|
|
|
- study_task_id = study(query_text)['task_id']
|
|
|
- llm_search = await rag_chat_agent.llm_search(query_text)
|
|
|
- decision = await rag_chat_agent.make_decision(chat_result, llm_search)
|
|
|
- data = {
|
|
|
- "result": decision["result"],
|
|
|
- "status": decision["status"],
|
|
|
- "relevance_score": decision["relevance_score"],
|
|
|
- }
|
|
|
- await chat_result_mapper.insert_chat_result(
|
|
|
- query_text,
|
|
|
- dataset_id_strs,
|
|
|
- json.dumps(query_results, ensure_ascii=False),
|
|
|
- chat_result["summary"],
|
|
|
- chat_result["relevance_score"],
|
|
|
- chat_result["status"],
|
|
|
- llm_search["answer"],
|
|
|
- llm_search["source"],
|
|
|
- llm_search["status"],
|
|
|
- decision["result"],
|
|
|
- study_task_id
|
|
|
- )
|
|
|
- return jsonify({"status_code": 200, "detail": "success", "data": data})
|
|
|
+ spilt_query = await rag_chat_agent.split_query(query_text)
|
|
|
+ split_questions = spilt_query["split_questions"]
|
|
|
+ split_questions.append(query_text)
|
|
|
+
|
|
|
+ # 使用asyncio.gather并行处理每个问题
|
|
|
+ tasks = [
|
|
|
+ process_question(question, query_text, rag_chat_agent)
|
|
|
+ for question in split_questions
|
|
|
+ ]
|
|
|
+
|
|
|
+ # 等待所有任务完成并收集结果
|
|
|
+ data_list = await asyncio.gather(*tasks)
|
|
|
+ return jsonify({"status_code": 200, "detail": "success", "data": data_list})
|
|
|
+
|
|
|
+
|
|
|
+async def process_question(question, query_text, rag_chat_agent):
|
|
|
+ try:
|
|
|
+ dataset_id_strs = "11,12"
|
|
|
+ dataset_ids = dataset_id_strs.split(",")
|
|
|
+ search_type = "hybrid"
|
|
|
+
|
|
|
+ # 执行查询任务
|
|
|
+ query_results = await query_search(
|
|
|
+ query_text=question,
|
|
|
+ filters={"dataset_id": dataset_ids},
|
|
|
+ search_type=search_type,
|
|
|
+ )
|
|
|
+
|
|
|
+ resource = get_resource_manager()
|
|
|
+ chat_result_mapper = ChatResult(resource.mysql_client)
|
|
|
+
|
|
|
+ # 异步执行 chat 与 deepseek 的对话
|
|
|
+ chat_result = await rag_chat_agent.chat_with_deepseek(question, query_results)
|
|
|
+
|
|
|
+ # # 判断是否需要执行 study
|
|
|
+ study_task_id = None
|
|
|
+ if chat_result["status"] == 0:
|
|
|
+ study_task_id = study(question)["task_id"]
|
|
|
+
|
|
|
+ # 异步获取 LLM 搜索结果
|
|
|
+ llm_search_result = await rag_chat_agent.llm_search(question)
|
|
|
+
|
|
|
+ # 执行决策逻辑
|
|
|
+ decision = await rag_chat_agent.make_decision(chat_result, llm_search_result)
|
|
|
+
|
|
|
+ # 构建返回的数据
|
|
|
+ data = {
|
|
|
+ "query": question,
|
|
|
+ "result": decision["result"],
|
|
|
+ "status": decision["status"],
|
|
|
+ "relevance_score": decision["relevance_score"],
|
|
|
+ }
|
|
|
+
|
|
|
+ # 插入数据库
|
|
|
+ await chat_result_mapper.insert_chat_result(
|
|
|
+ question,
|
|
|
+ dataset_id_strs,
|
|
|
+ json.dumps(query_results, ensure_ascii=False),
|
|
|
+ chat_result["summary"],
|
|
|
+ chat_result["relevance_score"],
|
|
|
+ chat_result["status"],
|
|
|
+ llm_search_result["answer"],
|
|
|
+ llm_search_result["source"],
|
|
|
+ llm_search_result["status"],
|
|
|
+ decision["result"],
|
|
|
+ study_task_id,
|
|
|
+ )
|
|
|
+ return data
|
|
|
+ except Exception as e:
|
|
|
+ print(f"Error processing question: {question}. Error: {str(e)}")
|
|
|
+ return {"query": question, "error": str(e)}
|
|
|
|
|
|
|
|
|
@server_bp.route("/chat/history", methods=["GET"])
|