__init__.py 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. import json
  2. import requests
  3. from flask import jsonify
  4. from datetime import datetime
  5. from openai import OpenAI
  6. from pqai_agent import logging_service, chat_service
  7. from pqai_agent.dialogue_manager import DialogueManager
  8. from pqai_agent.message import MessageType
  9. from pqai_agent.response_type_detector import ResponseTypeDetector
  10. from pqai_agent.user_profile_extractor import UserProfileExtractor
  11. logger = logging_service.logger
  12. def wrap_response(code, msg=None, data=None):
  13. resp = {"code": code, "msg": msg}
  14. if code == 200 and not msg:
  15. resp["msg"] = "success"
  16. if data:
  17. resp["data"] = data
  18. return jsonify(resp)
  19. def compose_openai_chat_messages_no_time(dialogue_history, multimodal=False):
  20. messages = []
  21. for entry in dialogue_history:
  22. role = entry["role"]
  23. msg_type = entry.get("type", MessageType.TEXT)
  24. fmt_time = DialogueManager.format_timestamp(entry["timestamp"])
  25. if msg_type in (MessageType.IMAGE_GW, MessageType.IMAGE_QW, MessageType.GIF):
  26. if multimodal:
  27. messages.append(
  28. {
  29. "role": role,
  30. "content": [
  31. {
  32. "type": "image_url",
  33. "image_url": {"url": entry["content"]},
  34. }
  35. ],
  36. }
  37. )
  38. else:
  39. logger.warning("Image in non-multimodal mode")
  40. messages.append({"role": role, "content": "[图片]"})
  41. else:
  42. messages.append({"role": role, "content": f'{entry["content"]}'})
  43. return messages
  44. def run_openai_chat(messages, model_name, **kwargs):
  45. volcengine_models = [
  46. chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_32K,
  47. chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_1_5,
  48. chat_service.VOLCENGINE_MODEL_DOUBAO_1_5_VISION_PRO,
  49. chat_service.VOLCENGINE_MODEL_DEEPSEEK_V3,
  50. ]
  51. deepseek_models = [
  52. chat_service.DEEPSEEK_CHAT_MODEL,
  53. ]
  54. volcengine_bots = [
  55. chat_service.VOLCENGINE_BOT_DEEPSEEK_V3_SEARCH,
  56. ]
  57. if model_name in volcengine_models:
  58. llm_client = OpenAI(
  59. api_key=chat_service.VOLCENGINE_API_TOKEN,
  60. base_url=chat_service.VOLCENGINE_BASE_URL,
  61. )
  62. elif model_name in volcengine_bots:
  63. llm_client = OpenAI(
  64. api_key=chat_service.VOLCENGINE_API_TOKEN,
  65. base_url=chat_service.VOLCENGINE_BOT_BASE_URL,
  66. )
  67. elif model_name in deepseek_models:
  68. llm_client = OpenAI(
  69. api_key=chat_service.DEEPSEEK_API_TOKEN,
  70. base_url=chat_service.DEEPSEEK_BASE_URL,
  71. )
  72. else:
  73. raise Exception("model not supported")
  74. response = llm_client.chat.completions.create(
  75. messages=messages, model=model_name, **kwargs
  76. )
  77. logger.debug(response)
  78. return response
  79. def run_extractor_prompt(req_data):
  80. prompt = req_data["prompt"]
  81. user_profile = req_data["user_profile"]
  82. staff_profile = req_data["staff_profile"]
  83. dialogue_history = req_data["dialogue_history"]
  84. model_name = req_data["model_name"]
  85. prompt_context = {
  86. **staff_profile,
  87. **user_profile,
  88. "dialogue_history": UserProfileExtractor.compose_dialogue(dialogue_history),
  89. }
  90. prompt = prompt.format(**prompt_context)
  91. messages = [
  92. {"role": "system", "content": "你是一个专业的用户画像分析助手。"},
  93. {"role": "user", "content": prompt},
  94. ]
  95. tools = [UserProfileExtractor.get_extraction_function()]
  96. response = run_openai_chat(messages, model_name, tools=tools, temperature=0)
  97. tool_calls = response.choices[0].message.tool_calls
  98. if tool_calls:
  99. function_call = tool_calls[0]
  100. if function_call.function.name == "update_user_profile":
  101. profile_info = json.loads(function_call.function.arguments)
  102. return {k: v for k, v in profile_info.items() if v}
  103. else:
  104. logger.error("llm does not return update_user_profile")
  105. return {}
  106. else:
  107. return {}
  108. def run_chat_prompt(req_data):
  109. prompt = req_data["prompt"]
  110. staff_profile = req_data.get("staff_profile", {})
  111. user_profile = req_data.get("user_profile", {})
  112. dialogue_history = req_data.get("dialogue_history", [])
  113. model_name = req_data["model_name"]
  114. current_timestamp = req_data["current_timestamp"] / 1000
  115. prompt_context = {**staff_profile, **user_profile}
  116. current_hour = datetime.fromtimestamp(current_timestamp).hour
  117. prompt_context["last_interaction_interval"] = 0
  118. prompt_context["current_time_period"] = DialogueManager.get_time_context(
  119. current_hour
  120. )
  121. prompt_context["current_hour"] = current_hour
  122. prompt_context["if_first_interaction"] = False if dialogue_history else True
  123. last_message = dialogue_history[-1] if dialogue_history else {"role": "assistant"}
  124. prompt_context["if_active_greeting"] = (
  125. False if last_message["role"] == "user" else True
  126. )
  127. current_time_str = datetime.fromtimestamp(current_timestamp).strftime(
  128. "%Y-%m-%d %H:%M:%S"
  129. )
  130. system_prompt = {"role": "system", "content": prompt.format(**prompt_context)}
  131. messages = [system_prompt]
  132. if req_data["scene"] == "custom_debugging":
  133. messages.extend(compose_openai_chat_messages_no_time(dialogue_history))
  134. if "头像" in system_prompt["content"]:
  135. messages.append(
  136. {
  137. "role": "user",
  138. "content": [
  139. {
  140. "type": "image_url",
  141. "image_url": {"url": user_profile["avatar"]},
  142. }
  143. ],
  144. }
  145. )
  146. else:
  147. messages.extend(
  148. DialogueManager.compose_chat_messages_openai_compatible(
  149. dialogue_history, current_time_str
  150. )
  151. )
  152. return run_openai_chat(
  153. messages, model_name, temperature=1, top_p=0.7, max_tokens=1024
  154. )
  155. def run_response_type_prompt(req_data):
  156. prompt = req_data["prompt"]
  157. dialogue_history = req_data["dialogue_history"]
  158. model_name = req_data["model_name"]
  159. composed_dialogue = ResponseTypeDetector.compose_dialogue(dialogue_history[:-1])
  160. next_message = DialogueManager.format_dialogue_content(dialogue_history[-1])
  161. prompt = prompt.format(dialogue_history=composed_dialogue, message=next_message)
  162. messages = [
  163. {"role": "system", "content": "你是一个专业的智能助手"},
  164. {"role": "user", "content": prompt},
  165. ]
  166. return run_openai_chat(messages, model_name, temperature=0.2, max_tokens=128)
  167. def quit_human_intervention_status(user_id, staff_id):
  168. url = f"http://ai-wechat-hook-internal.piaoquantv.com/manage/insertEvent?sender={user_id}&receiver={staff_id}&type=103&content=SYSTEM"
  169. response = requests.get(url, timeout=20)
  170. return response.json()