Browse Source

clean_agent

丁云鹏 1 week ago
parent
commit
648ea0aeea
5 changed files with 25 additions and 56 deletions
  1. 18 0
      agent.py
  2. 7 6
      agents/clean_agent/agent.py
  3. 0 31
      api.py
  4. 0 0
      prompt/clean_agent.md
  5. 0 19
      start_api.sh

+ 18 - 0
agent.py

@@ -20,6 +20,7 @@ from fastapi import FastAPI, HTTPException, BackgroundTasks
 from fastapi.responses import JSONResponse
 from pydantic import BaseModel, Field
 import uvicorn
+from agents.clean_agent.agent import execute_agent_with_api
 
 # LangGraph 相关导入
 try:
@@ -569,6 +570,23 @@ async def process_request_background(request_id: str):
         # 处理失败,更新状态为3
         update_request_status(request_id, 3)
 
+@app.post("/extract")
+async def extract(input: str):
+    """
+    执行Agent处理用户指令
+    
+    Args:
+        input: 包含用户指令的对象
+        
+    Returns:
+        dict: 包含执行结果的字典
+    """
+    try:
+        result = execute_agent_with_api(input)
+        return {"status": "success", "result": result}
+    except Exception as e:
+        raise HTTPException(status_code=500, detail=f"执行Agent时出错: {str(e)}")
+
 if __name__ == "__main__":
     # 启动服务
     uvicorn.run(

+ 7 - 6
agents/clean_agent/agent.py

@@ -46,8 +46,7 @@ prompt="""
 
 ---
 ### 请您按照以下格式提供信息:
-关键词:{query_word}
-请求ID:{request_id}
+{input}
 """
 
 class State(TypedDict):
@@ -69,7 +68,10 @@ def main():
 
 
 def execute_agent_with_api(user_input: str):
-    global graph, llm_with_tools
+    global graph, llm_with_tools, prompt
+    
+    # 替换prompt中的{input}占位符为用户输入
+    formatted_prompt = prompt.replace("{input}", user_input)
     
     # 如果graph或llm_with_tools未初始化,先初始化
     if graph is None or llm_with_tools is None:
@@ -102,9 +104,8 @@ def execute_agent_with_api(user_input: str):
     results = []
     config = {"configurable": {"thread_id": thread_id}}
     
-
-
-    for event in graph.stream({"messages": [{"role": "user", "content": user_input}]}, config, stream_mode="values"):
+    # 使用格式化后的prompt作为用户输入
+    for event in graph.stream({"messages": [{"role": "user", "content": formatted_prompt}]}, config, stream_mode="values"):
         for value in event.values():
             # 保存消息内容
             if "messages" in event and len(event["messages"]) > 0:

+ 0 - 31
api.py

@@ -1,31 +0,0 @@
-from fastapi import FastAPI, HTTPException
-from pydantic import BaseModel
-from agents.clean_agent.agent import execute_agent_with_api
-import uvicorn
-
-app = FastAPI(title="Knowledge Agent API", description="API for executing knowledge agent")
-
-
-@app.post("/execute")
-async def execute_agent(input: str):
-    """
-    执行Agent处理用户指令
-    
-    Args:
-        user_input: 包含用户指令的对象
-        
-    Returns:
-        dict: 包含执行结果的字典
-    """
-    try:
-        result = execute_agent_with_api(input)
-        return {"status": "success", "result": result}
-    except Exception as e:
-        raise HTTPException(status_code=500, detail=f"执行Agent时出错: {str(e)}")
-
-@app.get("/")
-async def root():
-    return {"message": "Knowledge Agent API 服务已启动,请使用 /execute 端点执行Agent"}
-
-if __name__ == "__main__":
-    uvicorn.run(app, host="0.0.0.0", port=8000)

+ 0 - 0
prompt/clean_agent.md


+ 0 - 19
start_api.sh

@@ -1,19 +0,0 @@
-#!/bin/bash
-
-# 启动Knowledge Agent API服务
-
-# 检查Python环境
-if ! command -v python3 &> /dev/null; then
-    echo "错误: 未找到python3命令"
-    exit 1
-fi
-
-# 检查依赖
-if ! python3 -c "import fastapi" &> /dev/null; then
-    echo "安装必要的依赖..."
-    pip3 install fastapi uvicorn
-fi
-
-# 启动API服务
-echo "启动Knowledge Agent API服务..."
-python3 api.py