| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133 |
- """
- OpenRouter Provider
- 使用 OpenRouter API 调用各种模型(包括 Claude Sonnet 4.5)
- 支持 OpenAI 兼容的 API 格式
- """
- import os
- import json
- import httpx
- from typing import List, Dict, Any, Optional
- async def openrouter_llm_call(
- messages: List[Dict[str, Any]],
- model: str = "anthropic/claude-sonnet-4.5",
- tools: Optional[List[Dict]] = None,
- **kwargs
- ) -> Dict[str, Any]:
- """
- OpenRouter LLM 调用函数
- Args:
- messages: OpenAI 格式消息列表
- model: 模型名称(如 "anthropic/claude-sonnet-4.5")
- tools: OpenAI 格式工具定义
- **kwargs: 其他参数(temperature, max_tokens 等)
- Returns:
- {
- "content": str,
- "tool_calls": List[Dict] | None,
- "prompt_tokens": int,
- "completion_tokens": int,
- "finish_reason": str,
- "cost": float
- }
- """
- api_key = os.getenv("OPEN_ROUTER_API_KEY")
- if not api_key:
- raise ValueError("OPEN_ROUTER_API_KEY environment variable not set")
- base_url = "https://openrouter.ai/api/v1"
- endpoint = f"{base_url}/chat/completions"
- # 构建请求
- payload = {
- "model": model,
- "messages": messages,
- }
- # 添加可选参数
- if tools:
- payload["tools"] = tools
- if "temperature" in kwargs:
- payload["temperature"] = kwargs["temperature"]
- if "max_tokens" in kwargs:
- payload["max_tokens"] = kwargs["max_tokens"]
- # OpenRouter 特定参数
- headers = {
- "Authorization": f"Bearer {api_key}",
- "HTTP-Referer": "https://github.com/your-repo", # 可选,用于统计
- "X-Title": "Agent Framework", # 可选,显示在 OpenRouter dashboard
- }
- # 调用 API
- async with httpx.AsyncClient(timeout=120.0) as client:
- try:
- response = await client.post(endpoint, json=payload, headers=headers)
- response.raise_for_status()
- result = response.json()
- except httpx.HTTPStatusError as e:
- error_body = e.response.text
- print(f"[OpenRouter] Error {e.response.status_code}: {error_body}")
- raise
- except Exception as e:
- print(f"[OpenRouter] Request failed: {e}")
- raise
- # 解析响应(OpenAI 格式)
- choice = result["choices"][0] if result.get("choices") else {}
- message = choice.get("message", {})
- content = message.get("content", "")
- tool_calls = message.get("tool_calls")
- finish_reason = choice.get("finish_reason") # stop, length, tool_calls, content_filter 等
- # 提取 usage
- usage = result.get("usage", {})
- prompt_tokens = usage.get("prompt_tokens", 0)
- completion_tokens = usage.get("completion_tokens", 0)
- # 计算成本(OpenRouter 通常在响应中提供,但这里简化为 0)
- cost = 0.0
- return {
- "content": content,
- "tool_calls": tool_calls,
- "prompt_tokens": prompt_tokens,
- "completion_tokens": completion_tokens,
- "finish_reason": finish_reason,
- "cost": cost
- }
- def create_openrouter_llm_call(
- model: str = "anthropic/claude-sonnet-4.5"
- ):
- """
- 创建 OpenRouter LLM 调用函数
- Args:
- model: 模型名称
- - "anthropic/claude-sonnet-4.5"
- - "anthropic/claude-opus-4.5"
- - "openai/gpt-4o"
- 等等
- Returns:
- 异步 LLM 调用函数
- """
- async def llm_call(
- messages: List[Dict[str, Any]],
- model: str = model,
- tools: Optional[List[Dict]] = None,
- **kwargs
- ) -> Dict[str, Any]:
- return await openrouter_llm_call(messages, model, tools, **kwargs)
- return llm_call
|