Przeglądaj źródła

Update api_server: return error

StrayWarrior 1 tydzień temu
rodzic
commit
665effdda8
1 zmienionych plików z 54 dodań i 42 usunięć
  1. 54 42
      api_server.py

+ 54 - 42
api_server.py

@@ -134,50 +134,62 @@ def get_base_prompt():
     }
     return wrap_response(200, data=data)
 
+def get_llm_response(model_name, messages):
+    pass
+
+def run_chat_prompt():
+    pass
+
+def run_extractor_prompt():
+    pass
+
 @app.route('/api/runPrompt', methods=['POST'])
 def run_prompt():
-    req_data = request.json
-    scene = req_data['scene']
-    prompt = req_data['prompt']
-    staff_profile = req_data['staff_profile']
-    user_profile = req_data['user_profile']
-    dialogue_history = req_data['dialogue_history']
-    model_name = req_data['model_name']
-    current_timestamp = req_data['current_timestamp']
-    prompt_context = {**staff_profile, **user_profile}
-    current_hour = datetime.fromtimestamp(current_timestamp).hour
-    prompt_context['last_interaction_interval'] = 0
-    prompt_context['current_time_period'] = DialogueManager.get_time_context(current_hour)
-    prompt_context['current_hour'] = current_hour
-    prompt_context['if_first_interaction'] = False if dialogue_history else True
-    volcengine_models = [
-        chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_32K,
-        chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_1_5,
-        chat_service.VOLCENGINE_MODEL_DEEPSEEK_V3
-    ]
-    deepseek_models = [
-        chat_service.DEEPSEEK_CHAT_MODEL,
-    ]
-    current_timestr = datetime.fromtimestamp(current_timestamp).strftime('%Y-%m-%d %H:%M:%S')
-    system_prompt = {
-        'role': 'system',
-        'content': prompt.format(**prompt_context)
-    }
-    messages = []
-    messages.append(system_prompt)
-    messages.extend(DialogueManager.compose_chat_messages_openai_compatible(dialogue_history, current_timestr))
-    if model_name in volcengine_models:
-        llm_client = OpenAI(api_key=chat_service.VOLCENGINE_API_TOKEN, base_url=chat_service.VOLCENGINE_BASE_URL)
-        response = llm_client.chat.completions.create(
-            messages=messages, model=model_name, temperature=1, top_p=0.7, max_tokens=1024)
-        return wrap_response(200, data=response.choices[0].message.content)
-    elif model_name in deepseek_models:
-        llm_client = OpenAI(api_key=chat_service.DEEPSEEK_API_TOKEN, base_url=chat_service.DEEPSEEK_BASE_URL)
-        response = llm_client.chat.completions.create(
-            messages=messages, model=model_name, temperature=1, top_p=0.7, max_tokens=1024)
-        return wrap_response(200, data=response.choices[0].message.content)
-    else:
-        return wrap_response(400, msg='model not supported')
+    try:
+        req_data = request.json
+        scene = req_data['scene']
+        prompt = req_data['prompt']
+        staff_profile = req_data['staff_profile']
+        user_profile = req_data['user_profile']
+        dialogue_history = req_data['dialogue_history']
+        model_name = req_data['model_name']
+        current_timestamp = req_data['current_timestamp'] / 1000
+        prompt_context = {**staff_profile, **user_profile}
+        current_hour = datetime.fromtimestamp(current_timestamp).hour
+        prompt_context['last_interaction_interval'] = 0
+        prompt_context['current_time_period'] = DialogueManager.get_time_context(current_hour)
+        prompt_context['current_hour'] = current_hour
+        prompt_context['if_first_interaction'] = False if dialogue_history else True
+        volcengine_models = [
+            chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_32K,
+            chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_1_5,
+            chat_service.VOLCENGINE_MODEL_DEEPSEEK_V3
+        ]
+        deepseek_models = [
+            chat_service.DEEPSEEK_CHAT_MODEL,
+        ]
+        current_time_str = datetime.fromtimestamp(current_timestamp).strftime('%Y-%m-%d %H:%M:%S')
+        system_prompt = {
+            'role': 'system',
+            'content': prompt.format(**prompt_context)
+        }
+        messages = []
+        messages.append(system_prompt)
+        messages.extend(DialogueManager.compose_chat_messages_openai_compatible(dialogue_history, current_time_str))
+        if model_name in volcengine_models:
+            llm_client = OpenAI(api_key=chat_service.VOLCENGINE_API_TOKEN, base_url=chat_service.VOLCENGINE_BASE_URL)
+            response = llm_client.chat.completions.create(
+                messages=messages, model=model_name, temperature=1, top_p=0.7, max_tokens=1024)
+            return wrap_response(200, data=response.choices[0].message.content)
+        elif model_name in deepseek_models:
+            llm_client = OpenAI(api_key=chat_service.DEEPSEEK_API_TOKEN, base_url=chat_service.DEEPSEEK_BASE_URL)
+            response = llm_client.chat.completions.create(
+                messages=messages, model=model_name, temperature=1, top_p=0.7, max_tokens=1024)
+            return wrap_response(200, data=response.choices[0].message.content)
+        else:
+            return wrap_response(400, msg='model not supported')
+    except Exception as e:
+        return wrap_response(500, msg='Error: {}'.format(e))
 
 @app.errorhandler(werkzeug.exceptions.BadRequest)
 def handle_bad_request(e):