prompt_util.py 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. import json
  2. from datetime import datetime
  3. from openai import OpenAI
  4. from pqai_agent import logging_service, chat_service
  5. from pqai_agent.response_type_detector import ResponseTypeDetector
  6. from pqai_agent.user_profile_extractor import UserProfileExtractor
  7. from pqai_agent.dialogue_manager import DialogueManager
  8. from pqai_agent.message import MessageType
  9. from pqai_agent.utils.prompt_utils import format_agent_profile
  10. logger = logging_service.logger
  11. def compose_openai_chat_messages_no_time(dialogue_history, multimodal=False):
  12. messages = []
  13. for entry in dialogue_history:
  14. role = entry["role"]
  15. msg_type = entry.get("type", MessageType.TEXT)
  16. fmt_time = DialogueManager.format_timestamp(entry["timestamp"])
  17. if msg_type in (MessageType.IMAGE_GW, MessageType.IMAGE_QW, MessageType.GIF):
  18. if multimodal:
  19. messages.append(
  20. {
  21. "role": role,
  22. "content": [
  23. {
  24. "type": "image_url",
  25. "image_url": {"url": entry["content"]},
  26. }
  27. ],
  28. }
  29. )
  30. else:
  31. logger.warning("Image in non-multimodal mode")
  32. messages.append({"role": role, "content": "[图片]"})
  33. else:
  34. messages.append({"role": role, "content": f'{entry["content"]}'})
  35. return messages
  36. def run_openai_chat(messages, model_name, **kwargs):
  37. volcengine_models = [
  38. chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_32K,
  39. chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_1_5,
  40. chat_service.VOLCENGINE_MODEL_DOUBAO_1_5_VISION_PRO,
  41. chat_service.VOLCENGINE_MODEL_DEEPSEEK_V3,
  42. ]
  43. deepseek_models = [
  44. chat_service.DEEPSEEK_CHAT_MODEL,
  45. ]
  46. volcengine_bots = [
  47. chat_service.VOLCENGINE_BOT_DEEPSEEK_V3_SEARCH,
  48. ]
  49. if model_name in volcengine_models:
  50. llm_client = OpenAI(
  51. api_key=chat_service.VOLCENGINE_API_TOKEN,
  52. base_url=chat_service.VOLCENGINE_BASE_URL,
  53. )
  54. elif model_name in volcengine_bots:
  55. llm_client = OpenAI(
  56. api_key=chat_service.VOLCENGINE_API_TOKEN,
  57. base_url=chat_service.VOLCENGINE_BOT_BASE_URL,
  58. )
  59. elif model_name in deepseek_models:
  60. llm_client = OpenAI(
  61. api_key=chat_service.DEEPSEEK_API_TOKEN,
  62. base_url=chat_service.DEEPSEEK_BASE_URL,
  63. )
  64. else:
  65. raise Exception("model not supported")
  66. response = llm_client.chat.completions.create(
  67. messages=messages, model=model_name, **kwargs
  68. )
  69. logger.debug(response)
  70. return response
  71. def run_extractor_prompt(req_data):
  72. prompt = req_data["prompt"]
  73. user_profile = req_data["user_profile"]
  74. staff_profile = req_data["staff_profile"]
  75. dialogue_history = req_data["dialogue_history"]
  76. model_name = req_data["model_name"]
  77. prompt_context = {
  78. "formatted_staff_profile": format_agent_profile(staff_profile),
  79. **user_profile,
  80. "dialogue_history": UserProfileExtractor.compose_dialogue(dialogue_history),
  81. }
  82. prompt = prompt.format(**prompt_context)
  83. messages = [
  84. {"role": "system", "content": "你是一个专业的用户画像分析助手。"},
  85. {"role": "user", "content": prompt},
  86. ]
  87. tools = [UserProfileExtractor.get_extraction_function()]
  88. response = run_openai_chat(messages, model_name, tools=tools, temperature=0)
  89. tool_calls = response.choices[0].message.tool_calls
  90. if tool_calls:
  91. function_call = tool_calls[0]
  92. if function_call.function.name == "update_user_profile":
  93. profile_info = json.loads(function_call.function.arguments)
  94. return {k: v for k, v in profile_info.items() if v}
  95. else:
  96. logger.error("llm does not return update_user_profile")
  97. return {}
  98. else:
  99. return {}
  100. def run_chat_prompt(req_data):
  101. prompt = req_data["prompt"]
  102. staff_profile = req_data.get("staff_profile", {})
  103. user_profile = req_data.get("user_profile", {})
  104. dialogue_history = req_data.get("dialogue_history", [])
  105. model_name = req_data["model_name"]
  106. current_timestamp = req_data["current_timestamp"] / 1000
  107. prompt_context = {
  108. 'formatted_staff_profile': format_agent_profile(staff_profile),
  109. **user_profile
  110. }
  111. current_hour = datetime.fromtimestamp(current_timestamp).hour
  112. prompt_context["last_interaction_interval"] = 0
  113. prompt_context["current_time_period"] = DialogueManager.get_time_context(
  114. current_hour
  115. )
  116. prompt_context["current_hour"] = current_hour
  117. prompt_context["if_first_interaction"] = False if dialogue_history else True
  118. last_message = dialogue_history[-1] if dialogue_history else {"role": "assistant"}
  119. prompt_context["if_active_greeting"] = (
  120. False if last_message["role"] == "user" else True
  121. )
  122. current_time_str = datetime.fromtimestamp(current_timestamp).strftime(
  123. "%Y-%m-%d %H:%M:%S"
  124. )
  125. system_prompt = {"role": "system", "content": prompt.format(**prompt_context)}
  126. messages = [system_prompt]
  127. if req_data["scene"] == "custom_debugging":
  128. messages.extend(compose_openai_chat_messages_no_time(dialogue_history))
  129. if "头像" in system_prompt["content"]:
  130. messages.append(
  131. {
  132. "role": "user",
  133. "content": [
  134. {
  135. "type": "image_url",
  136. "image_url": {"url": user_profile["avatar"]},
  137. }
  138. ],
  139. }
  140. )
  141. else:
  142. messages.extend(
  143. DialogueManager.compose_chat_messages_openai_compatible(
  144. dialogue_history, current_time_str
  145. )
  146. )
  147. return run_openai_chat(
  148. messages, model_name, temperature=1, top_p=0.7, max_tokens=1024
  149. )
  150. def run_response_type_prompt(req_data):
  151. prompt = req_data["prompt"]
  152. dialogue_history = req_data["dialogue_history"]
  153. model_name = req_data["model_name"]
  154. composed_dialogue = ResponseTypeDetector.compose_dialogue(dialogue_history[:-1])
  155. next_message = DialogueManager.format_dialogue_content(dialogue_history[-1])
  156. prompt = prompt.format(dialogue_history=composed_dialogue, message=next_message)
  157. messages = [
  158. {"role": "system", "content": "你是一个专业的智能助手"},
  159. {"role": "user", "content": prompt},
  160. ]
  161. return run_openai_chat(messages, model_name, temperature=0.2, max_tokens=128)