|
@@ -115,7 +115,8 @@ def list_scenes():
|
|
|
{'scene': 'greeting', 'display_name': '问候'},
|
|
|
{'scene': 'chitchat', 'display_name': '闲聊'},
|
|
|
{'scene': 'profile_extractor', 'display_name': '画像提取'},
|
|
|
- {'scene': 'response_type_detector', 'display_name': '回复模态判断'}
|
|
|
+ {'scene': 'response_type_detector', 'display_name': '回复模态判断'},
|
|
|
+ {'scene': 'custom_debugging', 'display_name': '自定义调试场景'}
|
|
|
]
|
|
|
return wrap_response(200, data=scenes)
|
|
|
|
|
@@ -126,13 +127,15 @@ def get_base_prompt():
|
|
|
'greeting': prompt_templates.GENERAL_GREETING_PROMPT,
|
|
|
'chitchat': prompt_templates.CHITCHAT_PROMPT_COZE,
|
|
|
'profile_extractor': prompt_templates.USER_PROFILE_EXTRACT_PROMPT,
|
|
|
- 'response_type_detector': prompt_templates.RESPONSE_TYPE_DETECT_PROMPT
|
|
|
+ 'response_type_detector': prompt_templates.RESPONSE_TYPE_DETECT_PROMPT,
|
|
|
+ 'custom_debugging': '',
|
|
|
}
|
|
|
model_map = {
|
|
|
'greeting': chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_32K,
|
|
|
'chitchat': chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_32K,
|
|
|
'profile_extractor': chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_1_5,
|
|
|
- 'response_type_detector': chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_1_5
|
|
|
+ 'response_type_detector': chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_1_5,
|
|
|
+ 'custom_debugging': chat_service.VOLCENGINE_BOT_DEEPSEEK_V3_SEARCH
|
|
|
}
|
|
|
if scene not in prompt_map:
|
|
|
return wrap_response(404, msg='scene not found')
|
|
@@ -151,18 +154,20 @@ def run_openai_chat(messages, model_name, **kwargs):
|
|
|
deepseek_models = [
|
|
|
chat_service.DEEPSEEK_CHAT_MODEL,
|
|
|
]
|
|
|
+ volcengine_bots = [
|
|
|
+ chat_service.VOLCENGINE_BOT_DEEPSEEK_V3_SEARCH,
|
|
|
+ ]
|
|
|
if model_name in volcengine_models:
|
|
|
llm_client = OpenAI(api_key=chat_service.VOLCENGINE_API_TOKEN, base_url=chat_service.VOLCENGINE_BASE_URL)
|
|
|
- response = llm_client.chat.completions.create(
|
|
|
- messages=messages, model=model_name, **kwargs)
|
|
|
- return response
|
|
|
+ elif model_name in volcengine_bots:
|
|
|
+ llm_client = OpenAI(api_key=chat_service.VOLCENGINE_API_TOKEN, base_url=chat_service.VOLCENGINE_BOT_BASE_URL)
|
|
|
elif model_name in deepseek_models:
|
|
|
llm_client = OpenAI(api_key=chat_service.DEEPSEEK_API_TOKEN, base_url=chat_service.DEEPSEEK_BASE_URL)
|
|
|
- response = llm_client.chat.completions.create(
|
|
|
- messages=messages, model=model_name, temperature=1, top_p=0.7, max_tokens=1024)
|
|
|
- return response
|
|
|
else:
|
|
|
raise Exception('model not supported')
|
|
|
+ response = llm_client.chat.completions.create(
|
|
|
+ messages=messages, model=model_name, **kwargs)
|
|
|
+ return response
|
|
|
|
|
|
def run_extractor_prompt(req_data):
|
|
|
prompt = req_data['prompt']
|