Ver Fonte

1. 将 mysql_session_manager.py 中的 customer_id 替换成 user_id
2. 将utils/__init__.py中的方法分拆

luojunhui há 3 semanas atrás
pai
commit
90fbe26663

+ 9 - 9
pqai_agent_server/models/mysql_session_manager.py

@@ -35,7 +35,7 @@ class SessionManager(abc.ABC):
 
     @abc.abstractmethod
     def get_conversation_list(
-        self, staff_id: str, customer_id: str, page: Optional[int], page_size: int
+        self, staff_id: str, user_id: str, page: Optional[int], page_size: int
     ) -> Dict:
         pass
 
@@ -194,8 +194,8 @@ class MySQLSessionManager(SessionManager):
             else:
                 temp_obj["message"] = last_message[0]["content"]
                 temp_obj["timestamp"] = last_message[0]["max_timestamp"]
-            temp_obj["customer_id"] = user_id
-            temp_obj["customer_name"] = session["name"]
+            temp_obj["user_id"] = user_id
+            temp_obj["user_name"] = session["name"]
             temp_obj["avatar"] = session["iconurl"]
             response_data.append(temp_obj)
         return {
@@ -206,16 +206,16 @@ class MySQLSessionManager(SessionManager):
         }
 
     def get_conversation_list(
-        self, staff_id: str, customer_id: str, page: Optional[int], page_size: int
+        self, staff_id: str, user_id: str, page: Optional[int], page_size: int
     ):
         """
         :param page_size:
         :param staff_id:
-        :param customer_id:
+        :param user_id:
         :param page: timestamp
         :return:
         """
-        room_id = ":".join(["private", staff_id, customer_id])
+        room_id = ":".join(["private", staff_id, user_id])
         if not page:
             fetch_query = f"""
                 select t1.sender, t2.name, t1.sendtime, t1.content, t2.iconurl
@@ -258,13 +258,13 @@ class MySQLSessionManager(SessionManager):
                     "avatar": message["iconurl"],
                     "content": message["content"],
                     "timestamp": message["sendtime"],
-                    "role": "customer" if message["sender"] == customer_id else "staff",
+                    "role": "user" if message["sender"] == user_id else "staff",
                 }
                 for message in messages
             ]
             return {
                 "staff_id": staff_id,
-                "customer_id": customer_id,
+                "user_id": user_id,
                 "has_next_page": has_next_page,
                 "next_page": next_page,
                 "data": response_data,
@@ -274,7 +274,7 @@ class MySQLSessionManager(SessionManager):
             next_page = None
             return {
                 "staff_id": staff_id,
-                "customer_id": customer_id,
+                "user_id": user_id,
                 "has_next_page": has_next_page,
                 "next_page": next_page,
                 "data": [],

+ 10 - 191
pqai_agent_server/utils/__init__.py

@@ -1,191 +1,10 @@
-import json
-
-import requests
-from flask import jsonify
-from datetime import datetime
-
-from openai import OpenAI
-
-from pqai_agent import logging_service, chat_service
-from pqai_agent.dialogue_manager import DialogueManager
-from pqai_agent.message import MessageType
-from pqai_agent.response_type_detector import ResponseTypeDetector
-from pqai_agent.user_profile_extractor import UserProfileExtractor
-
-logger = logging_service.logger
-
-
-def wrap_response(code, msg=None, data=None):
-    resp = {"code": code, "msg": msg}
-    if code == 200 and not msg:
-        resp["msg"] = "success"
-    if data:
-        resp["data"] = data
-    return jsonify(resp)
-
-
-def compose_openai_chat_messages_no_time(dialogue_history, multimodal=False):
-    messages = []
-    for entry in dialogue_history:
-        role = entry["role"]
-        msg_type = entry.get("type", MessageType.TEXT)
-        fmt_time = DialogueManager.format_timestamp(entry["timestamp"])
-        if msg_type in (MessageType.IMAGE_GW, MessageType.IMAGE_QW, MessageType.GIF):
-            if multimodal:
-                messages.append(
-                    {
-                        "role": role,
-                        "content": [
-                            {
-                                "type": "image_url",
-                                "image_url": {"url": entry["content"]},
-                            }
-                        ],
-                    }
-                )
-            else:
-                logger.warning("Image in non-multimodal mode")
-                messages.append({"role": role, "content": "[图片]"})
-        else:
-            messages.append({"role": role, "content": f'{entry["content"]}'})
-    return messages
-
-
-def run_openai_chat(messages, model_name, **kwargs):
-    volcengine_models = [
-        chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_32K,
-        chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_1_5,
-        chat_service.VOLCENGINE_MODEL_DOUBAO_1_5_VISION_PRO,
-        chat_service.VOLCENGINE_MODEL_DEEPSEEK_V3,
-    ]
-    deepseek_models = [
-        chat_service.DEEPSEEK_CHAT_MODEL,
-    ]
-    volcengine_bots = [
-        chat_service.VOLCENGINE_BOT_DEEPSEEK_V3_SEARCH,
-    ]
-    if model_name in volcengine_models:
-        llm_client = OpenAI(
-            api_key=chat_service.VOLCENGINE_API_TOKEN,
-            base_url=chat_service.VOLCENGINE_BASE_URL,
-        )
-    elif model_name in volcengine_bots:
-        llm_client = OpenAI(
-            api_key=chat_service.VOLCENGINE_API_TOKEN,
-            base_url=chat_service.VOLCENGINE_BOT_BASE_URL,
-        )
-    elif model_name in deepseek_models:
-        llm_client = OpenAI(
-            api_key=chat_service.DEEPSEEK_API_TOKEN,
-            base_url=chat_service.DEEPSEEK_BASE_URL,
-        )
-    else:
-        raise Exception("model not supported")
-    response = llm_client.chat.completions.create(
-        messages=messages, model=model_name, **kwargs
-    )
-    logger.debug(response)
-    return response
-
-
-def run_extractor_prompt(req_data):
-    prompt = req_data["prompt"]
-    user_profile = req_data["user_profile"]
-    staff_profile = req_data["staff_profile"]
-    dialogue_history = req_data["dialogue_history"]
-    model_name = req_data["model_name"]
-    prompt_context = {
-        **staff_profile,
-        **user_profile,
-        "dialogue_history": UserProfileExtractor.compose_dialogue(dialogue_history),
-    }
-    prompt = prompt.format(**prompt_context)
-    messages = [
-        {"role": "system", "content": "你是一个专业的用户画像分析助手。"},
-        {"role": "user", "content": prompt},
-    ]
-    tools = [UserProfileExtractor.get_extraction_function()]
-    response = run_openai_chat(messages, model_name, tools=tools, temperature=0)
-    tool_calls = response.choices[0].message.tool_calls
-    if tool_calls:
-        function_call = tool_calls[0]
-        if function_call.function.name == "update_user_profile":
-            profile_info = json.loads(function_call.function.arguments)
-            return {k: v for k, v in profile_info.items() if v}
-        else:
-            logger.error("llm does not return update_user_profile")
-            return {}
-    else:
-        return {}
-
-
-def run_chat_prompt(req_data):
-    prompt = req_data["prompt"]
-    staff_profile = req_data.get("staff_profile", {})
-    user_profile = req_data.get("user_profile", {})
-    dialogue_history = req_data.get("dialogue_history", [])
-    model_name = req_data["model_name"]
-    current_timestamp = req_data["current_timestamp"] / 1000
-    prompt_context = {**staff_profile, **user_profile}
-    current_hour = datetime.fromtimestamp(current_timestamp).hour
-    prompt_context["last_interaction_interval"] = 0
-    prompt_context["current_time_period"] = DialogueManager.get_time_context(
-        current_hour
-    )
-    prompt_context["current_hour"] = current_hour
-    prompt_context["if_first_interaction"] = False if dialogue_history else True
-    last_message = dialogue_history[-1] if dialogue_history else {"role": "assistant"}
-    prompt_context["if_active_greeting"] = (
-        False if last_message["role"] == "user" else True
-    )
-
-    current_time_str = datetime.fromtimestamp(current_timestamp).strftime(
-        "%Y-%m-%d %H:%M:%S"
-    )
-    system_prompt = {"role": "system", "content": prompt.format(**prompt_context)}
-    messages = [system_prompt]
-    if req_data["scene"] == "custom_debugging":
-        messages.extend(compose_openai_chat_messages_no_time(dialogue_history))
-        if "头像" in system_prompt["content"]:
-            messages.append(
-                {
-                    "role": "user",
-                    "content": [
-                        {
-                            "type": "image_url",
-                            "image_url": {"url": user_profile["avatar"]},
-                        }
-                    ],
-                }
-            )
-    else:
-        messages.extend(
-            DialogueManager.compose_chat_messages_openai_compatible(
-                dialogue_history, current_time_str
-            )
-        )
-    return run_openai_chat(
-        messages, model_name, temperature=1, top_p=0.7, max_tokens=1024
-    )
-
-
-def run_response_type_prompt(req_data):
-    prompt = req_data["prompt"]
-    dialogue_history = req_data["dialogue_history"]
-    model_name = req_data["model_name"]
-
-    composed_dialogue = ResponseTypeDetector.compose_dialogue(dialogue_history[:-1])
-    next_message = DialogueManager.format_dialogue_content(dialogue_history[-1])
-    prompt = prompt.format(dialogue_history=composed_dialogue, message=next_message)
-    messages = [
-        {"role": "system", "content": "你是一个专业的智能助手"},
-        {"role": "user", "content": prompt},
-    ]
-    return run_openai_chat(messages, model_name, temperature=0.2, max_tokens=128)
-
-
-def quit_human_intervention_status(user_id, staff_id):
-    url = f"http://ai-wechat-hook-internal.piaoquantv.com/manage/insertEvent?sender={user_id}&receiver={staff_id}&type=103&content=SYSTEM"
-    response = requests.get(url, timeout=20)
-    return response.json()
-
+from .common import wrap_response
+from .common import quit_human_intervention_status
+
+from .prompt_util import (
+    run_openai_chat,
+    run_chat_prompt,
+    run_extractor_prompt,
+    run_response_type_prompt,
+    compose_openai_chat_messages_no_time,
+)

+ 17 - 0
pqai_agent_server/utils/common.py

@@ -0,0 +1,17 @@
+import requests
+from flask import jsonify
+
+
+def wrap_response(code, msg=None, data=None):
+    resp = {"code": code, "msg": msg}
+    if code == 200 and not msg:
+        resp["msg"] = "success"
+    if data:
+        resp["data"] = data
+    return jsonify(resp)
+
+
+def quit_human_intervention_status(user_id, staff_id):
+    url = f"http://ai-wechat-hook-internal.piaoquantv.com/manage/insertEvent?sender={user_id}&receiver={staff_id}&type=103&content=SYSTEM"
+    response = requests.get(url, timeout=20)
+    return response.json()

+ 172 - 0
pqai_agent_server/utils/prompt_util.py

@@ -0,0 +1,172 @@
+import json
+
+from datetime import datetime
+from openai import OpenAI
+
+from pqai_agent import logging_service, chat_service
+from pqai_agent.response_type_detector import ResponseTypeDetector
+from pqai_agent.user_profile_extractor import UserProfileExtractor
+from pqai_agent.dialogue_manager import DialogueManager
+from pqai_agent.message import MessageType
+
+logger = logging_service.logger
+
+
+def compose_openai_chat_messages_no_time(dialogue_history, multimodal=False):
+    messages = []
+    for entry in dialogue_history:
+        role = entry["role"]
+        msg_type = entry.get("type", MessageType.TEXT)
+        fmt_time = DialogueManager.format_timestamp(entry["timestamp"])
+        if msg_type in (MessageType.IMAGE_GW, MessageType.IMAGE_QW, MessageType.GIF):
+            if multimodal:
+                messages.append(
+                    {
+                        "role": role,
+                        "content": [
+                            {
+                                "type": "image_url",
+                                "image_url": {"url": entry["content"]},
+                            }
+                        ],
+                    }
+                )
+            else:
+                logger.warning("Image in non-multimodal mode")
+                messages.append({"role": role, "content": "[图片]"})
+        else:
+            messages.append({"role": role, "content": f'{entry["content"]}'})
+    return messages
+
+
+def run_openai_chat(messages, model_name, **kwargs):
+    volcengine_models = [
+        chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_32K,
+        chat_service.VOLCENGINE_MODEL_DOUBAO_PRO_1_5,
+        chat_service.VOLCENGINE_MODEL_DOUBAO_1_5_VISION_PRO,
+        chat_service.VOLCENGINE_MODEL_DEEPSEEK_V3,
+    ]
+    deepseek_models = [
+        chat_service.DEEPSEEK_CHAT_MODEL,
+    ]
+    volcengine_bots = [
+        chat_service.VOLCENGINE_BOT_DEEPSEEK_V3_SEARCH,
+    ]
+    if model_name in volcengine_models:
+        llm_client = OpenAI(
+            api_key=chat_service.VOLCENGINE_API_TOKEN,
+            base_url=chat_service.VOLCENGINE_BASE_URL,
+        )
+    elif model_name in volcengine_bots:
+        llm_client = OpenAI(
+            api_key=chat_service.VOLCENGINE_API_TOKEN,
+            base_url=chat_service.VOLCENGINE_BOT_BASE_URL,
+        )
+    elif model_name in deepseek_models:
+        llm_client = OpenAI(
+            api_key=chat_service.DEEPSEEK_API_TOKEN,
+            base_url=chat_service.DEEPSEEK_BASE_URL,
+        )
+    else:
+        raise Exception("model not supported")
+    response = llm_client.chat.completions.create(
+        messages=messages, model=model_name, **kwargs
+    )
+    logger.debug(response)
+    return response
+
+
+def run_extractor_prompt(req_data):
+    prompt = req_data["prompt"]
+    user_profile = req_data["user_profile"]
+    staff_profile = req_data["staff_profile"]
+    dialogue_history = req_data["dialogue_history"]
+    model_name = req_data["model_name"]
+    prompt_context = {
+        **staff_profile,
+        **user_profile,
+        "dialogue_history": UserProfileExtractor.compose_dialogue(dialogue_history),
+    }
+    prompt = prompt.format(**prompt_context)
+    messages = [
+        {"role": "system", "content": "你是一个专业的用户画像分析助手。"},
+        {"role": "user", "content": prompt},
+    ]
+    tools = [UserProfileExtractor.get_extraction_function()]
+    response = run_openai_chat(messages, model_name, tools=tools, temperature=0)
+    tool_calls = response.choices[0].message.tool_calls
+    if tool_calls:
+        function_call = tool_calls[0]
+        if function_call.function.name == "update_user_profile":
+            profile_info = json.loads(function_call.function.arguments)
+            return {k: v for k, v in profile_info.items() if v}
+        else:
+            logger.error("llm does not return update_user_profile")
+            return {}
+    else:
+        return {}
+
+
+def run_chat_prompt(req_data):
+    prompt = req_data["prompt"]
+    staff_profile = req_data.get("staff_profile", {})
+    user_profile = req_data.get("user_profile", {})
+    dialogue_history = req_data.get("dialogue_history", [])
+    model_name = req_data["model_name"]
+    current_timestamp = req_data["current_timestamp"] / 1000
+    prompt_context = {**staff_profile, **user_profile}
+    current_hour = datetime.fromtimestamp(current_timestamp).hour
+    prompt_context["last_interaction_interval"] = 0
+    prompt_context["current_time_period"] = DialogueManager.get_time_context(
+        current_hour
+    )
+    prompt_context["current_hour"] = current_hour
+    prompt_context["if_first_interaction"] = False if dialogue_history else True
+    last_message = dialogue_history[-1] if dialogue_history else {"role": "assistant"}
+    prompt_context["if_active_greeting"] = (
+        False if last_message["role"] == "user" else True
+    )
+
+    current_time_str = datetime.fromtimestamp(current_timestamp).strftime(
+        "%Y-%m-%d %H:%M:%S"
+    )
+    system_prompt = {"role": "system", "content": prompt.format(**prompt_context)}
+    messages = [system_prompt]
+    if req_data["scene"] == "custom_debugging":
+        messages.extend(compose_openai_chat_messages_no_time(dialogue_history))
+        if "头像" in system_prompt["content"]:
+            messages.append(
+                {
+                    "role": "user",
+                    "content": [
+                        {
+                            "type": "image_url",
+                            "image_url": {"url": user_profile["avatar"]},
+                        }
+                    ],
+                }
+            )
+    else:
+        messages.extend(
+            DialogueManager.compose_chat_messages_openai_compatible(
+                dialogue_history, current_time_str
+            )
+        )
+    return run_openai_chat(
+        messages, model_name, temperature=1, top_p=0.7, max_tokens=1024
+    )
+
+
+def run_response_type_prompt(req_data):
+    prompt = req_data["prompt"]
+    dialogue_history = req_data["dialogue_history"]
+    model_name = req_data["model_name"]
+
+    composed_dialogue = ResponseTypeDetector.compose_dialogue(dialogue_history[:-1])
+    next_message = DialogueManager.format_dialogue_content(dialogue_history[-1])
+    prompt = prompt.format(dialogue_history=composed_dialogue, message=next_message)
+    messages = [
+        {"role": "system", "content": "你是一个专业的智能助手"},
+        {"role": "user", "content": prompt},
+    ]
+    return run_openai_chat(messages, model_name, temperature=0.2, max_tokens=128)