utils.py 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. import json
  2. from flask import jsonify
  3. from datetime import datetime
  4. from openai import OpenAI
  5. from pqai_agent import logging_service, chat_service
  6. from pqai_agent.dialogue_manager import DialogueManager
  7. from pqai_agent.message import MessageType
  8. from pqai_agent.response_type_detector import ResponseTypeDetector
  9. from pqai_agent.user_profile_extractor import UserProfileExtractor
  10. logger = logging_service.logger
  11. def wrap_response(code, msg=None, data=None):
  12. resp = {"code": code, "msg": msg}
  13. if code == 200 and not msg:
  14. resp["msg"] = "success"
  15. if data:
  16. resp["data"] = data
  17. return jsonify(resp)
  18. def compose_openai_chat_messages_no_time(dialogue_history, multimodal=False):
  19. messages = []
  20. for entry in dialogue_history:
  21. role = entry["role"]
  22. msg_type = entry.get("type", MessageType.TEXT)
  23. fmt_time = DialogueManager.format_timestamp(entry["timestamp"])
  24. if msg_type in (MessageType.IMAGE_GW, MessageType.IMAGE_QW, MessageType.GIF):
  25. if multimodal:
  26. messages.append(
  27. {
  28. "role": role,
  29. "content": [
  30. {
  31. "type": "image_url",
  32. "image_url": {"url": entry["content"]},
  33. }
  34. ],
  35. }
  36. )
  37. else:
  38. logger.warning("Image in non-multimodal mode")
  39. messages.append({"role": role, "content": "[图片]"})
  40. else:
  41. messages.append({"role": role, "content": f'{entry["content"]}'})
  42. return messages
  43. def run_openai_chat(messages, model_name, **kwargs):
  44. volcengine_models = [
  45. chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_32K,
  46. chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_1_5,
  47. chat_service.VOLCENGINE_MODEL_DOUBAO_1_5_VISION_PRO,
  48. chat_service.VOLCENGINE_MODEL_DEEPSEEK_V3,
  49. ]
  50. deepseek_models = [
  51. chat_service.DEEPSEEK_CHAT_MODEL,
  52. ]
  53. volcengine_bots = [
  54. chat_service.VOLCENGINE_BOT_DEEPSEEK_V3_SEARCH,
  55. ]
  56. if model_name in volcengine_models:
  57. llm_client = OpenAI(
  58. api_key=chat_service.VOLCENGINE_API_TOKEN,
  59. base_url=chat_service.VOLCENGINE_BASE_URL,
  60. )
  61. elif model_name in volcengine_bots:
  62. llm_client = OpenAI(
  63. api_key=chat_service.VOLCENGINE_API_TOKEN,
  64. base_url=chat_service.VOLCENGINE_BOT_BASE_URL,
  65. )
  66. elif model_name in deepseek_models:
  67. llm_client = OpenAI(
  68. api_key=chat_service.DEEPSEEK_API_TOKEN,
  69. base_url=chat_service.DEEPSEEK_BASE_URL,
  70. )
  71. else:
  72. raise Exception("model not supported")
  73. response = llm_client.chat.completions.create(
  74. messages=messages, model=model_name, **kwargs
  75. )
  76. logger.debug(response)
  77. return response
  78. def run_extractor_prompt(req_data):
  79. prompt = req_data["prompt"]
  80. user_profile = req_data["user_profile"]
  81. staff_profile = req_data["staff_profile"]
  82. dialogue_history = req_data["dialogue_history"]
  83. model_name = req_data["model_name"]
  84. prompt_context = {
  85. **staff_profile,
  86. **user_profile,
  87. "dialogue_history": UserProfileExtractor.compose_dialogue(dialogue_history),
  88. }
  89. prompt = prompt.format(**prompt_context)
  90. messages = [
  91. {"role": "system", "content": "你是一个专业的用户画像分析助手。"},
  92. {"role": "user", "content": prompt},
  93. ]
  94. tools = [UserProfileExtractor.get_extraction_function()]
  95. response = run_openai_chat(messages, model_name, tools=tools, temperature=0)
  96. tool_calls = response.choices[0].message.tool_calls
  97. if tool_calls:
  98. function_call = tool_calls[0]
  99. if function_call.function.name == "update_user_profile":
  100. profile_info = json.loads(function_call.function.arguments)
  101. return {k: v for k, v in profile_info.items() if v}
  102. else:
  103. logger.error("llm does not return update_user_profile")
  104. return {}
  105. else:
  106. return {}
  107. def run_chat_prompt(req_data):
  108. prompt = req_data["prompt"]
  109. staff_profile = req_data.get("staff_profile", {})
  110. user_profile = req_data.get("user_profile", {})
  111. dialogue_history = req_data.get("dialogue_history", [])
  112. model_name = req_data["model_name"]
  113. current_timestamp = req_data["current_timestamp"] / 1000
  114. prompt_context = {**staff_profile, **user_profile}
  115. current_hour = datetime.fromtimestamp(current_timestamp).hour
  116. prompt_context["last_interaction_interval"] = 0
  117. prompt_context["current_time_period"] = DialogueManager.get_time_context(
  118. current_hour
  119. )
  120. prompt_context["current_hour"] = current_hour
  121. prompt_context["if_first_interaction"] = False if dialogue_history else True
  122. last_message = dialogue_history[-1] if dialogue_history else {"role": "assistant"}
  123. prompt_context["if_active_greeting"] = (
  124. False if last_message["role"] == "user" else True
  125. )
  126. current_time_str = datetime.fromtimestamp(current_timestamp).strftime(
  127. "%Y-%m-%d %H:%M:%S"
  128. )
  129. system_prompt = {"role": "system", "content": prompt.format(**prompt_context)}
  130. messages = [system_prompt]
  131. if req_data["scene"] == "custom_debugging":
  132. messages.extend(compose_openai_chat_messages_no_time(dialogue_history))
  133. if "头像" in system_prompt["content"]:
  134. messages.append(
  135. {
  136. "role": "user",
  137. "content": [
  138. {
  139. "type": "image_url",
  140. "image_url": {"url": user_profile["avatar"]},
  141. }
  142. ],
  143. }
  144. )
  145. else:
  146. messages.extend(
  147. DialogueManager.compose_chat_messages_openai_compatible(
  148. dialogue_history, current_time_str
  149. )
  150. )
  151. return run_openai_chat(
  152. messages, model_name, temperature=1, top_p=0.7, max_tokens=1024
  153. )
  154. def run_response_type_prompt(req_data):
  155. prompt = req_data["prompt"]
  156. dialogue_history = req_data["dialogue_history"]
  157. model_name = req_data["model_name"]
  158. composed_dialogue = ResponseTypeDetector.compose_dialogue(dialogue_history[:-1])
  159. next_message = DialogueManager.format_dialogue_content(dialogue_history[-1])
  160. prompt = prompt.format(dialogue_history=composed_dialogue, message=next_message)
  161. messages = [
  162. {"role": "system", "content": "你是一个专业的智能助手"},
  163. {"role": "user", "content": prompt},
  164. ]
  165. return run_openai_chat(messages, model_name, temperature=0.2, max_tokens=128)