prompt_util.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. import json
  2. from datetime import datetime
  3. from typing import List, Dict
  4. from openai import OpenAI
  5. from pqai_agent import logging_service, chat_service
  6. from pqai_agent.response_type_detector import ResponseTypeDetector
  7. from pqai_agent.user_profile_extractor import UserProfileExtractor
  8. from pqai_agent.dialogue_manager import DialogueManager
  9. from pqai_agent.mq_message import MessageType
  10. from pqai_agent.utils.prompt_utils import format_agent_profile
  11. logger = logging_service.logger
  12. def compose_openai_chat_messages_no_time(dialogue_history, multimodal=False):
  13. messages = []
  14. for entry in dialogue_history:
  15. role = entry["role"]
  16. msg_type = entry.get("type", MessageType.TEXT)
  17. fmt_time = DialogueManager.format_timestamp(entry["timestamp"])
  18. if msg_type in (MessageType.IMAGE_GW, MessageType.IMAGE_QW, MessageType.GIF):
  19. if multimodal:
  20. messages.append(
  21. {
  22. "role": role,
  23. "content": [
  24. {
  25. "type": "image_url",
  26. "image_url": {"url": entry["content"]},
  27. }
  28. ],
  29. }
  30. )
  31. else:
  32. logger.warning("Image in non-multimodal mode")
  33. messages.append({"role": role, "content": "[图片]"})
  34. else:
  35. messages.append({"role": role, "content": f'{entry["content"]}'})
  36. return messages
  37. def run_openai_chat(messages, model_name, **kwargs):
  38. volcengine_models = [
  39. chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_32K,
  40. chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_1_5,
  41. chat_service.VOLCENGINE_MODEL_DOUBAO_1_5_VISION_PRO,
  42. chat_service.VOLCENGINE_MODEL_DEEPSEEK_V3,
  43. ]
  44. deepseek_models = [
  45. chat_service.DEEPSEEK_CHAT_MODEL,
  46. ]
  47. volcengine_bots = [
  48. chat_service.VOLCENGINE_BOT_DEEPSEEK_V3_SEARCH,
  49. ]
  50. if model_name in volcengine_models:
  51. llm_client = OpenAI(
  52. api_key=chat_service.VOLCENGINE_API_TOKEN,
  53. base_url=chat_service.VOLCENGINE_BASE_URL,
  54. )
  55. elif model_name in volcengine_bots:
  56. llm_client = OpenAI(
  57. api_key=chat_service.VOLCENGINE_API_TOKEN,
  58. base_url=chat_service.VOLCENGINE_BOT_BASE_URL,
  59. )
  60. elif model_name in deepseek_models:
  61. llm_client = OpenAI(
  62. api_key=chat_service.DEEPSEEK_API_TOKEN,
  63. base_url=chat_service.DEEPSEEK_BASE_URL,
  64. )
  65. else:
  66. raise Exception("model not supported")
  67. response = llm_client.chat.completions.create(
  68. messages=messages, model=model_name, **kwargs
  69. )
  70. logger.debug(response)
  71. return response
  72. def run_extractor_prompt(req_data):
  73. prompt = req_data["prompt"]
  74. user_profile = req_data["user_profile"]
  75. staff_profile = req_data["staff_profile"]
  76. dialogue_history = req_data["dialogue_history"]
  77. model_name = req_data["model_name"]
  78. prompt_context = {
  79. "formatted_staff_profile": format_agent_profile(staff_profile),
  80. **user_profile,
  81. "dialogue_history": UserProfileExtractor.compose_dialogue(dialogue_history),
  82. }
  83. prompt = prompt.format(**prompt_context)
  84. messages = [
  85. {"role": "system", "content": "你是一个专业的用户画像分析助手。"},
  86. {"role": "user", "content": prompt},
  87. ]
  88. tools = [UserProfileExtractor.get_extraction_function()]
  89. response = run_openai_chat(messages, model_name, tools=tools, temperature=0)
  90. tool_calls = response.choices[0].message.tool_calls
  91. if tool_calls:
  92. function_call = tool_calls[0]
  93. if function_call.function.name == "update_user_profile":
  94. profile_info = json.loads(function_call.function.arguments)
  95. return {k: v for k, v in profile_info.items() if v}
  96. else:
  97. logger.error("llm does not return update_user_profile")
  98. return {}
  99. else:
  100. return {}
  101. def run_chat_prompt(req_data):
  102. prompt = req_data["prompt"]
  103. staff_profile = req_data.get("staff_profile", {})
  104. user_profile = req_data.get("user_profile", {})
  105. dialogue_history = req_data.get("dialogue_history", [])
  106. model_name = req_data["model_name"]
  107. current_timestamp = req_data["current_timestamp"] / 1000
  108. prompt_context = {
  109. 'formatted_staff_profile': format_agent_profile(staff_profile),
  110. **user_profile
  111. }
  112. current_hour = datetime.fromtimestamp(current_timestamp).hour
  113. prompt_context["last_interaction_interval"] = 0
  114. prompt_context["current_time_period"] = DialogueManager.get_time_context(
  115. current_hour
  116. )
  117. prompt_context["current_hour"] = current_hour
  118. prompt_context["if_first_interaction"] = False if dialogue_history else True
  119. last_message = dialogue_history[-1] if dialogue_history else {"role": "assistant"}
  120. prompt_context["if_active_greeting"] = (
  121. False if last_message["role"] == "user" else True
  122. )
  123. current_time_str = datetime.fromtimestamp(current_timestamp).strftime(
  124. "%Y-%m-%d %H:%M:%S"
  125. )
  126. system_prompt = {"role": "system", "content": prompt.format(**prompt_context)}
  127. messages = [system_prompt]
  128. if req_data["scene"] == "custom_debugging":
  129. messages.extend(compose_openai_chat_messages_no_time(dialogue_history))
  130. if "头像" in system_prompt["content"]:
  131. messages.append(
  132. {
  133. "role": "user",
  134. "content": [
  135. {
  136. "type": "image_url",
  137. "image_url": {"url": user_profile["avatar"]},
  138. }
  139. ],
  140. }
  141. )
  142. else:
  143. messages.extend(
  144. DialogueManager.compose_chat_messages_openai_compatible(
  145. dialogue_history, current_time_str
  146. )
  147. )
  148. return run_openai_chat(
  149. messages, model_name, temperature=1, top_p=0.7, max_tokens=1024
  150. )
  151. def run_response_type_prompt(req_data):
  152. prompt = req_data["prompt"]
  153. dialogue_history = req_data["dialogue_history"]
  154. model_name = req_data["model_name"]
  155. composed_dialogue = ResponseTypeDetector.compose_dialogue(dialogue_history[:-1])
  156. next_message = DialogueManager.format_dialogue_content(dialogue_history[-1])
  157. prompt = prompt.format(dialogue_history=composed_dialogue, message=next_message)
  158. messages = [
  159. {"role": "system", "content": "你是一个专业的智能助手"},
  160. {"role": "user", "content": prompt},
  161. ]
  162. return run_openai_chat(messages, model_name, temperature=0.2, max_tokens=128)
  163. def format_dialogue_history(dialogue: List[Dict]) -> str:
  164. role_map = {'user': '用户', 'assistant': '客服'}
  165. messages = []
  166. for msg in dialogue:
  167. if not msg['content']:
  168. continue
  169. if msg['role'] not in role_map:
  170. continue
  171. format_dt = datetime.fromtimestamp(msg['timestamp'] / 1000).strftime('%Y-%m-%d %H:%M:%S')
  172. msg_type = MessageType(msg.get('type', MessageType.TEXT.value)).description
  173. messages.append('[{}][{}][{}]{}'.format(role_map[msg['role']], format_dt, msg_type, msg['content']))
  174. return '\n'.join(messages)