|
@@ -239,20 +239,34 @@ class AgentService:
|
|
|
|
|
|
if should_initiate:
|
|
|
logger.warning("user: {}, initiate conversation".format(user_id))
|
|
|
- resp = self._get_chat_response(user_id, agent, None)
|
|
|
- if resp:
|
|
|
- self._send_response(staff_id, user_id, resp, MessageType.TEXT)
|
|
|
- if self.limit_initiative_conversation_rate:
|
|
|
- time.sleep(random.randint(10,20))
|
|
|
+ try:
|
|
|
+ # resp = self._get_chat_response(user_id, agent, None)
|
|
|
+ resp = self._generate_active_greeting_message(agent)
|
|
|
+ if resp:
|
|
|
+ self._send_response(staff_id, user_id, resp, MessageType.TEXT)
|
|
|
+ if self.limit_initiative_conversation_rate:
|
|
|
+ time.sleep(random.randint(10,20))
|
|
|
+ except Exception as e:
|
|
|
+ logger.error("Error in active greeting: {}".format(e))
|
|
|
else:
|
|
|
logger.debug("user: {}, do not initiate conversation".format(user_id))
|
|
|
|
|
|
+ def _generate_active_greeting_message(self, agent: DialogueManager):
|
|
|
+ chat_config = agent.build_active_greeting_config()
|
|
|
+ chat_response = self._call_chat_api(chat_config, ChatServiceType.OPENAI_COMPATIBLE)
|
|
|
+ chat_response = self.sanitize_response(chat_response)
|
|
|
+ if response := agent.generate_response(chat_response):
|
|
|
+ return response
|
|
|
+ else:
|
|
|
+ logger.warning(f"staff[{agent.staff_id}] user[{agent.user_id}]: no response generated")
|
|
|
+ return None
|
|
|
+
|
|
|
def _get_chat_response(self, user_id: str, agent: DialogueManager,
|
|
|
user_message: Optional[str]):
|
|
|
"""处理LLM响应"""
|
|
|
chat_config = agent.build_chat_configuration(user_message, self.chat_service_type)
|
|
|
logger.debug(chat_config)
|
|
|
- chat_response = self._call_chat_api(chat_config)
|
|
|
+ chat_response = self._call_chat_api(chat_config, self.chat_service_type)
|
|
|
chat_response = self.sanitize_response(chat_response)
|
|
|
|
|
|
if response := agent.generate_response(chat_response):
|