guantao 6 часов назад
Родитель
Сommit
b9141e37df
5 измененных файлов с 107 добавлено и 9 удалено
  1. 2 0
      agent/llm/__init__.py
  2. 98 0
      agent/llm/qwen.py
  3. 2 2
      examples/plan/config.py
  4. 3 5
      examples/plan/research.prompt
  5. 2 2
      examples/plan/run.py

+ 2 - 0
agent/llm/__init__.py

@@ -7,6 +7,7 @@ LLM Providers
 from .gemini import create_gemini_llm_call
 from .openrouter import create_openrouter_llm_call
 from .yescode import create_yescode_llm_call
+from .qwen import create_qwen_llm_call
 from .usage import TokenUsage, TokenUsageAccumulator, create_usage_from_response
 from .pricing import (
     ModelPricing,
@@ -20,6 +21,7 @@ __all__ = [
     "create_gemini_llm_call",
     "create_openrouter_llm_call",
     "create_yescode_llm_call",
+    "create_qwen_llm_call",
     # Usage
     "TokenUsage",
     "TokenUsageAccumulator",

+ 98 - 0
agent/llm/qwen.py

@@ -0,0 +1,98 @@
+"""
+Qwen LLM provider using OpenAI SDK.
+"""
+
+import os
+import logging
+from typing import Any, Callable, Dict, List, Optional
+from openai import AsyncOpenAI
+
+# 这里的导入根据你的项目结构调整
+from .usage import TokenUsage
+from .pricing import PricingCalculator
+
+logger = logging.getLogger(__name__)
+
+# 2026 推荐:如果 qwen3.5-plus 报 404,请先用 qwen-plus 测试
+# 阿里有时要求兼容模式下的 ID 必须是特定的字符串
+DEFAULT_QWEN_MODEL = "qwen-plus" 
+
+def create_qwen_llm_call(
+    model: str = DEFAULT_QWEN_MODEL,
+    base_url: Optional[str] = None,
+    api_key: Optional[str] = None,
+) -> Callable:
+    """
+    Create a Qwen LLM call function using the OpenAI SDK.
+    """
+    # 获取配置
+    # 注意:使用 OpenAI SDK 时,base_url 必须包含到 /v1
+    api_key = api_key or os.getenv("QWEN_API_KEY")
+    base_url = base_url or os.getenv("QWEN_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")
+
+    if not api_key:
+        raise ValueError("QWEN_API_KEY is required")
+
+    # 初始化 OpenAI 异步客户端
+    # SDK 会自动处理 /chat/completions 的拼接
+    client = AsyncOpenAI(
+        api_key=api_key,
+        base_url=base_url
+    )
+
+    pricing_calc = PricingCalculator()
+
+    async def llm_call(
+        messages: List[Dict[str, Any]],
+        model: str = model,
+        tools: Optional[List[Dict]] = None,
+        temperature: float = 0.7,
+        max_tokens: int = 4096,
+        **kwargs
+    ) -> Dict[str, Any]:
+        
+        try:
+            response = await client.chat.completions.create(
+                model=model,
+                messages=messages,
+                tools=tools,
+                temperature=temperature,
+                max_tokens=max_tokens,
+                **kwargs
+            )
+
+            # 获取内容
+            content = response.choices[0].message.content or ""
+            
+            # --- 关键修正位置 ---
+            # 将 Pydantic 对象转换为原始 Dict 列表,这样 runner.py 的 .get() 才不会报错
+            tool_calls = None
+            if response.choices[0].message.tool_calls:
+                tool_calls = [
+                    tc.model_dump() for tc in response.choices[0].message.tool_calls
+                ]
+            # ------------------
+
+            usage = TokenUsage(
+                input_tokens=response.usage.prompt_tokens,
+                output_tokens=response.usage.completion_tokens,
+            )
+
+            cost = pricing_calc.calculate_cost(model=model, usage=usage)
+
+            return {
+                "content": content,
+                "tool_calls": tool_calls, # 现在这里是 List[Dict] 了
+                "prompt_tokens": usage.input_tokens,
+                "completion_tokens": usage.output_tokens,
+                "reasoning_tokens": getattr(response.usage, "reasoning_tokens", 0),
+                "finish_reason": response.choices[0].finish_reason,
+                "cost": cost,
+                "usage": usage,
+            }
+
+        except Exception as e:
+            logger.error(f"Qwen SDK Call Failed: {str(e)}")
+            raise
+
+    return llm_call

+ 2 - 2
examples/plan/config.py

@@ -11,7 +11,7 @@ from agent.core.runner import KnowledgeConfig, RunConfig
 
 RUN_CONFIG = RunConfig(
     # 模型配置
-    model="claude-sonnet-4.5",
+    model="qwen3.5-plus",
     temperature=0.3,
     max_iterations=1000,
 
@@ -36,7 +36,7 @@ RUN_CONFIG = RunConfig(
         default_tags={"project": "research", "domain": "ai_agent"},  # 默认 tags(会与工具调用参数合并)
         default_scopes=["org:cybertogether"],  # 默认 scopes
         default_search_types=["strategy", "tool"],  # 默认搜索类型过滤
-        default_search_owner="1746532635@qq.com"  # 默认搜索 owner 过滤(空则不过滤)
+        default_search_owner=""  # 默认搜索 owner 过滤(空则不过滤)
     )
 )
 

+ 3 - 5
examples/plan/research.prompt

@@ -1,5 +1,5 @@
 ---
-model: anthropic/claude-sonnet-4.5
+model: qwen3.5-plus
 temperature: 0.3
 ---
 
@@ -68,7 +68,7 @@ $system$
 
 2. **其次:线上调研**:
    - 知识库中没有或不够用时,再去线上搜索
-   - 从工作流角度(小红书、公众号、知乎):搜索多图一致性生成、人物写真还原等实战工作流
+   - 从工作流角度(小红书、公众号、知乎):根据第一步得到的需求自行决定关键词搜索信息, 主要搜索相关实战工作流
    - 从工具能力角度(GitHub、产品官网、社区):搜索各类工具的能力边界,判断哪些能力可以支撑哪种策略
      - C 端平台(Midjourney、即梦、海螺、Lovart、可灵等)
      - 专业平台和开源项目
@@ -94,9 +94,7 @@ $system$
 
 **开始前**:重新读取 analysis.json 和 research.json。
 
-现在才需要精细地查看具体素材。根据选定的策略,逐个读取:
-- 每张图的制作表(`descriptions/写生油画__img_N_制作表.json`)
-- 每个 features/ 目录的 mapping.json 和具体素材
+现在才需要精细地查看具体素材。根据选定的策略,逐个读取。
 
 目标是将策略实例化:手头有哪些素材可以直接用、策略中的每一步对应到具体的图和特征上。
 

+ 2 - 2
examples/plan/run.py

@@ -36,7 +36,7 @@ from agent.trace import (
     Trace,
     Message,
 )
-from agent.llm import create_openrouter_llm_call
+from agent.llm import create_qwen_llm_call
 from agent.cli import InteractiveController
 from agent.utils import setup_logging
 from agent.tools.builtin.browser.baseClass import init_browser_session, kill_browser_session
@@ -115,7 +115,7 @@ async def main():
     store = FileSystemTraceStore(base_path=TRACE_STORE_PATH)
     runner = AgentRunner(
         trace_store=store,
-        llm_call=create_openrouter_llm_call(model=model_for_llm),
+        llm_call=create_qwen_llm_call(model=model_for_llm),
         skills_dir=SKILLS_DIR,
         debug=DEBUG
     )