prompt_util.py 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. import json
  2. from datetime import datetime
  3. from openai import OpenAI
  4. from pqai_agent import logging_service, chat_service
  5. from pqai_agent.response_type_detector import ResponseTypeDetector
  6. from pqai_agent.user_profile_extractor import UserProfileExtractor
  7. from pqai_agent.dialogue_manager import DialogueManager
  8. from pqai_agent.message import MessageType
  9. logger = logging_service.logger
  10. def compose_openai_chat_messages_no_time(dialogue_history, multimodal=False):
  11. messages = []
  12. for entry in dialogue_history:
  13. role = entry["role"]
  14. msg_type = entry.get("type", MessageType.TEXT)
  15. fmt_time = DialogueManager.format_timestamp(entry["timestamp"])
  16. if msg_type in (MessageType.IMAGE_GW, MessageType.IMAGE_QW, MessageType.GIF):
  17. if multimodal:
  18. messages.append(
  19. {
  20. "role": role,
  21. "content": [
  22. {
  23. "type": "image_url",
  24. "image_url": {"url": entry["content"]},
  25. }
  26. ],
  27. }
  28. )
  29. else:
  30. logger.warning("Image in non-multimodal mode")
  31. messages.append({"role": role, "content": "[图片]"})
  32. else:
  33. messages.append({"role": role, "content": f'{entry["content"]}'})
  34. return messages
  35. def run_openai_chat(messages, model_name, **kwargs):
  36. volcengine_models = [
  37. chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_32K,
  38. chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_1_5,
  39. chat_service.VOLCENGINE_MODEL_DOUBAO_1_5_VISION_PRO,
  40. chat_service.VOLCENGINE_MODEL_DEEPSEEK_V3,
  41. ]
  42. deepseek_models = [
  43. chat_service.DEEPSEEK_CHAT_MODEL,
  44. ]
  45. volcengine_bots = [
  46. chat_service.VOLCENGINE_BOT_DEEPSEEK_V3_SEARCH,
  47. ]
  48. if model_name in volcengine_models:
  49. llm_client = OpenAI(
  50. api_key=chat_service.VOLCENGINE_API_TOKEN,
  51. base_url=chat_service.VOLCENGINE_BASE_URL,
  52. )
  53. elif model_name in volcengine_bots:
  54. llm_client = OpenAI(
  55. api_key=chat_service.VOLCENGINE_API_TOKEN,
  56. base_url=chat_service.VOLCENGINE_BOT_BASE_URL,
  57. )
  58. elif model_name in deepseek_models:
  59. llm_client = OpenAI(
  60. api_key=chat_service.DEEPSEEK_API_TOKEN,
  61. base_url=chat_service.DEEPSEEK_BASE_URL,
  62. )
  63. else:
  64. raise Exception("model not supported")
  65. response = llm_client.chat.completions.create(
  66. messages=messages, model=model_name, **kwargs
  67. )
  68. logger.debug(response)
  69. return response
  70. def run_extractor_prompt(req_data):
  71. prompt = req_data["prompt"]
  72. user_profile = req_data["user_profile"]
  73. staff_profile = req_data["staff_profile"]
  74. dialogue_history = req_data["dialogue_history"]
  75. model_name = req_data["model_name"]
  76. prompt_context = {
  77. **staff_profile,
  78. **user_profile,
  79. "dialogue_history": UserProfileExtractor.compose_dialogue(dialogue_history),
  80. }
  81. prompt = prompt.format(**prompt_context)
  82. messages = [
  83. {"role": "system", "content": "你是一个专业的用户画像分析助手。"},
  84. {"role": "user", "content": prompt},
  85. ]
  86. tools = [UserProfileExtractor.get_extraction_function()]
  87. response = run_openai_chat(messages, model_name, tools=tools, temperature=0)
  88. tool_calls = response.choices[0].message.tool_calls
  89. if tool_calls:
  90. function_call = tool_calls[0]
  91. if function_call.function.name == "update_user_profile":
  92. profile_info = json.loads(function_call.function.arguments)
  93. return {k: v for k, v in profile_info.items() if v}
  94. else:
  95. logger.error("llm does not return update_user_profile")
  96. return {}
  97. else:
  98. return {}
  99. def run_chat_prompt(req_data):
  100. prompt = req_data["prompt"]
  101. staff_profile = req_data.get("staff_profile", {})
  102. user_profile = req_data.get("user_profile", {})
  103. dialogue_history = req_data.get("dialogue_history", [])
  104. model_name = req_data["model_name"]
  105. current_timestamp = req_data["current_timestamp"] / 1000
  106. prompt_context = {**staff_profile, **user_profile}
  107. current_hour = datetime.fromtimestamp(current_timestamp).hour
  108. prompt_context["last_interaction_interval"] = 0
  109. prompt_context["current_time_period"] = DialogueManager.get_time_context(
  110. current_hour
  111. )
  112. prompt_context["current_hour"] = current_hour
  113. prompt_context["if_first_interaction"] = False if dialogue_history else True
  114. last_message = dialogue_history[-1] if dialogue_history else {"role": "assistant"}
  115. prompt_context["if_active_greeting"] = (
  116. False if last_message["role"] == "user" else True
  117. )
  118. current_time_str = datetime.fromtimestamp(current_timestamp).strftime(
  119. "%Y-%m-%d %H:%M:%S"
  120. )
  121. system_prompt = {"role": "system", "content": prompt.format(**prompt_context)}
  122. messages = [system_prompt]
  123. if req_data["scene"] == "custom_debugging":
  124. messages.extend(compose_openai_chat_messages_no_time(dialogue_history))
  125. if "头像" in system_prompt["content"]:
  126. messages.append(
  127. {
  128. "role": "user",
  129. "content": [
  130. {
  131. "type": "image_url",
  132. "image_url": {"url": user_profile["avatar"]},
  133. }
  134. ],
  135. }
  136. )
  137. else:
  138. messages.extend(
  139. DialogueManager.compose_chat_messages_openai_compatible(
  140. dialogue_history, current_time_str
  141. )
  142. )
  143. return run_openai_chat(
  144. messages, model_name, temperature=1, top_p=0.7, max_tokens=1024
  145. )
  146. def run_response_type_prompt(req_data):
  147. prompt = req_data["prompt"]
  148. dialogue_history = req_data["dialogue_history"]
  149. model_name = req_data["model_name"]
  150. composed_dialogue = ResponseTypeDetector.compose_dialogue(dialogue_history[:-1])
  151. next_message = DialogueManager.format_dialogue_content(dialogue_history[-1])
  152. prompt = prompt.format(dialogue_history=composed_dialogue, message=next_message)
  153. messages = [
  154. {"role": "system", "content": "你是一个专业的智能助手"},
  155. {"role": "user", "content": prompt},
  156. ]
  157. return run_openai_chat(messages, model_name, temperature=0.2, max_tokens=128)