Przeglądaj źródła

Merge branch 'main' into main

TAKO 11 miesięcy temu
rodzic
commit
8e68bcce29
69 zmienionych plików z 2317 dodań i 1551 usunięć
  1. 5 5
      .github/workflows/docker-image-amd64.yml
  2. 7 8
      .github/workflows/docker-image-arm64.yml
  3. 91 130
      README.md
  4. 2 0
      common/constants.go
  5. 1 1
      common/custom-event.go
  6. 1 1
      controller/channel-test.go
  7. 51 0
      controller/relay.go
  8. 212 0
      dto/claude.go
  9. 14 11
      dto/openai_response.go
  10. 5 4
      dto/realtime.go
  11. 1 1
      go.mod
  12. 8 0
      middleware/auth.go
  13. 6 5
      model/main.go
  14. 2 1
      relay/channel/adapter.go
  15. 8 2
      relay/channel/ali/adaptor.go
  16. 5 7
      relay/channel/ali/image.go
  17. 4 0
      relay/channel/api_request.go
  18. 6 2
      relay/channel/aws/adaptor.go
  19. 13 13
      relay/channel/aws/dto.go
  20. 31 34
      relay/channel/aws/relay-aws.go
  21. 7 1
      relay/channel/baidu/adaptor.go
  22. 8 2
      relay/channel/baidu_v2/adaptor.go
  23. 5 1
      relay/channel/claude/adaptor.go
  24. 93 92
      relay/channel/claude/dto.go
  25. 173 86
      relay/channel/claude/relay-claude.go
  26. 7 1
      relay/channel/cloudflare/adaptor.go
  27. 7 2
      relay/channel/cohere/adaptor.go
  28. 8 2
      relay/channel/deepseek/adaptor.go
  29. 17 11
      relay/channel/dify/adaptor.go
  30. 7 1
      relay/channel/gemini/adaptor.go
  31. 11 3
      relay/channel/jina/adaptor.go
  32. 0 59
      relay/channel/jina/relay-jina.go
  33. 8 2
      relay/channel/mistral/adaptor.go
  34. 9 3
      relay/channel/mokaai/adaptor.go
  35. 8 2
      relay/channel/ollama/adaptor.go
  36. 49 9
      relay/channel/openai/adaptor.go
  37. 188 0
      relay/channel/openai/helper.go
  38. 15 94
      relay/channel/openai/relay-openai.go
  39. 0 74
      relay/channel/openrouter/adaptor.go
  40. 7 2
      relay/channel/palm/adaptor.go
  41. 8 3
      relay/channel/perplexity/adaptor.go
  42. 10 4
      relay/channel/siliconflow/adaptor.go
  43. 7 2
      relay/channel/tencent/adaptor.go
  44. 5 2
      relay/channel/vertex/adaptor.go
  45. 14 14
      relay/channel/vertex/dto.go
  46. 9 3
      relay/channel/volcengine/adaptor.go
  47. 7 0
      relay/channel/xinference/constant.go
  48. 7 2
      relay/channel/xunfei/adaptor.go
  49. 7 2
      relay/channel/zhipu/adaptor.go
  50. 8 3
      relay/channel/zhipu_4v/adaptor.go
  51. 163 0
      relay/claude_handler.go
  52. 30 0
      relay/common/relay_info.go
  53. 35 0
      relay/common_handler/rerank.go
  54. 3 0
      relay/constant/api_type.go
  55. 24 0
      relay/helper/common.go
  56. 4 0
      relay/helper/price.go
  57. 4 1
      relay/relay-text.go
  58. 3 4
      relay/relay_adaptor.go
  59. 1 0
      router/relay-router.go
  60. 351 0
      service/convert.go
  61. 24 0
      service/error.go
  62. 9 0
      service/log_info_generate.go
  63. 69 0
      service/quota.go
  64. 105 0
      service/token_counter.go
  65. 42 29
      setting/operation_setting/cache_ratio.go
  66. 66 13
      web/src/components/LogsTable.js
  67. 0 790
      web/src/components/SafetySetting.js
  68. 9 3
      web/src/constants/channel.constants.js
  69. 193 4
      web/src/helpers/render.js

+ 5 - 5
.github/workflows/docker-image-amd64.yml

@@ -18,20 +18,20 @@ jobs:
       contents: read
     steps:
       - name: Check out the repo
-        uses: actions/checkout@v3
+        uses: actions/checkout@v4
 
       - name: Save version info
         run: |
           git describe --tags > VERSION 
 
       - name: Log in to Docker Hub
-        uses: docker/login-action@v2
+        uses: docker/login-action@v3
         with:
           username: ${{ secrets.DOCKERHUB_USERNAME }}
           password: ${{ secrets.DOCKERHUB_TOKEN }}
 
       - name: Log in to the Container registry
-        uses: docker/login-action@v2
+        uses: docker/login-action@v3
         with:
           registry: ghcr.io
           username: ${{ github.actor }}
@@ -39,14 +39,14 @@ jobs:
 
       - name: Extract metadata (tags, labels) for Docker
         id: meta
-        uses: docker/metadata-action@v4
+        uses: docker/metadata-action@v5
         with:
           images: |
             calciumion/new-api
             ghcr.io/${{ github.repository }}
 
       - name: Build and push Docker images
-        uses: docker/build-push-action@v3
+        uses: docker/build-push-action@v5
         with:
           context: .
           push: true

+ 7 - 8
.github/workflows/docker-image-arm64.yml

@@ -4,7 +4,6 @@ on:
   push:
     tags:
       - '*'
-      - '!*-alpha*'
   workflow_dispatch:
     inputs:
       name:
@@ -19,26 +18,26 @@ jobs:
       contents: read
     steps:
       - name: Check out the repo
-        uses: actions/checkout@v3
+        uses: actions/checkout@v4
 
       - name: Save version info
         run: |
           git describe --tags > VERSION 
 
       - name: Set up QEMU
-        uses: docker/setup-qemu-action@v2
+        uses: docker/setup-qemu-action@v3
 
       - name: Set up Docker Buildx
-        uses: docker/setup-buildx-action@v2
+        uses: docker/setup-buildx-action@v3
 
       - name: Log in to Docker Hub
-        uses: docker/login-action@v2
+        uses: docker/login-action@v3
         with:
           username: ${{ secrets.DOCKERHUB_USERNAME }}
           password: ${{ secrets.DOCKERHUB_TOKEN }}
 
       - name: Log in to the Container registry
-        uses: docker/login-action@v2
+        uses: docker/login-action@v3
         with:
           registry: ghcr.io
           username: ${{ github.actor }}
@@ -46,14 +45,14 @@ jobs:
 
       - name: Extract metadata (tags, labels) for Docker
         id: meta
-        uses: docker/metadata-action@v4
+        uses: docker/metadata-action@v5
         with:
           images: |
             calciumion/new-api
             ghcr.io/${{ github.repository }}
 
       - name: Build and push Docker images
-        uses: docker/build-push-action@v3
+        uses: docker/build-push-action@v5
         with:
           context: .
           platforms: linux/amd64,linux/arm64

+ 91 - 130
README.md

@@ -7,7 +7,6 @@
 
 # New API
 
-
 🍥新一代大模型网关与AI资产管理系统
 
 <a href="https://trendshift.io/repositories/8227" target="_blank"><img src="https://trendshift.io/api/badge/repositories/8227" alt="Calcium-Ion%2Fnew-api | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
@@ -41,39 +40,40 @@
 > - 本项目仅供个人学习使用,不保证稳定性,且不提供任何技术支持。
 > - 根据[《生成式人工智能服务管理暂行办法》](http://www.cac.gov.cn/2023-07/13/c_1690898327029107.htm)的要求,请勿对中国地区公众提供一切未经备案的生成式人工智能服务。
 
+## 📚 文档
+
+详细文档请访问我们的官方Wiki:[https://docs.newapi.pro/](https://docs.newapi.pro/)
+
 ## ✨ 主要特性
 
-1. 🎨 全新的UI界面(部分界面还待更新)
-2. 🌍 多语言支持(待完善)
-3. 🎨 添加[Midjourney-Proxy(Plus)](https://github.com/novicezk/midjourney-proxy)接口支持,[对接文档](Midjourney.md)
-4. 💰 支持在线充值功能,可在系统设置中设置:
-    - [x] 易支付
-5. 🔍 支持用key查询使用额度:
-    - 配合项目[neko-api-key-tool](https://github.com/Calcium-Ion/neko-api-key-tool)可实现用key查询使用
+New API提供了丰富的功能,详细特性请参考[维基百科-特性说明](https://docs.newapi.pro/wiki/features-introduction):
+
+1. 🎨 全新的UI界面
+2. 🌍 多语言支持
+3. 🎨 支持[Midjourney-Proxy(Plus)](https://github.com/novicezk/midjourney-proxy)接口,[对接文档](https://docs.newapi.pro/api/relay/image/midjourney)
+4. 💰 支持在线充值功能(易支付)
+5. 🔍 支持用key查询使用额度(配合[neko-api-key-tool](https://github.com/Calcium-Ion/neko-api-key-tool))
 6. 📑 分页支持选择每页显示数量
-7. 🔄 兼容原版One API的数据库,可直接使用原版数据库(one-api.db)
-8. 💵 支持模型按次数收费,可在 系统设置-运营设置 中设置
-9. ⚖️ 支持渠道**加权随机**
+7. 🔄 兼容原版One API的数据库
+8. 💵 支持模型按次数收费
+9. ⚖️ 支持渠道加权随机
 10. 📈 数据看板(控制台)
 11. 🔒 可设置令牌能调用的模型
-12. 🤖 支持Telegram授权登录:
-    1. 系统设置-配置登录注册-允许通过Telegram登录
-    2. 对[@Botfather](https://t.me/botfather)输入指令/setdomain
-    3. 选择你的bot,然后输入http(s)://你的网站地址/login
-    4. Telegram Bot 名称是bot username 去掉@后的字符串
-13. 🎵 添加 [Suno API](https://github.com/Suno-API/Suno-API)接口支持,[对接文档](Suno.md)
-14. 🔄 支持Rerank模型,目前兼容Cohere和Jina,可接入Dify,[对接文档](Rerank.md)
-15. ⚡ **[OpenAI Realtime API](https://platform.openai.com/docs/guides/realtime/integration)** - 支持OpenAI的Realtime API,支持Azure渠道
-16. 支持使用路由/chat2link 进入聊天界面
-17. 🧠 支持通过模型名称后缀设置 reasoning effort:
+12. 🤖 支持Telegram授权登录
+13. 🎵 支持[Suno API](https://github.com/Suno-API/Suno-API)接口,[接口文档](https://docs.newapi.pro/api/suno-music)
+14. 🔄 支持Rerank模型(Cohere和Jina),[接口文档](https://docs.newapi.pro/api/jinaai-rerank)
+15. ⚡ 支持OpenAI Realtime API(包括Azure渠道),[接口文档](https://docs.newapi.pro/api/openai-realtime)
+16. ⚡ 支持Claude Messages 格式,[接口文档](https://docs.newapi.pro/api/anthropic-chat)
+17. 支持使用路由/chat2link进入聊天界面
+18. 🧠 支持通过模型名称后缀设置 reasoning effort:
     1. OpenAI o系列模型
         - 添加后缀 `-high` 设置为 high reasoning effort (例如: `o3-mini-high`)
         - 添加后缀 `-medium` 设置为 medium reasoning effort (例如: `o3-mini-medium`)
         - 添加后缀 `-low` 设置为 low reasoning effort (例如: `o3-mini-low`)
     2. Claude 思考模型
         - 添加后缀 `-thinking` 启用思考模式 (例如: `claude-3-7-sonnet-20250219-thinking`)
-18. 🔄 思考转内容,支持在 `渠道-编辑-渠道额外设置` 中设置 `thinking_to_content` 选项,默认`false`,开启后会将思考内容`reasoning_content`转换为`<think>`标签拼接到内容中返回。
-19. 🔄 模型限流,支持在 `系统设置-速率限制设置` 中设置模型限流,支持设置总请求数限制和成功请求数限制
+19. 🔄 思考转内容功能
+20. 🔄 模型限流功能
 20. 💰 缓存计费支持,开启后可以在缓存命中时按照设定的比例计费:
     1. 在 `系统设置-运营设置` 中设置 `提示缓存倍率` 选项
     2. 在渠道中设置 `提示缓存倍率`,范围 0-1,例如设置为 0.5 表示缓存命中时按照 50% 计费
@@ -81,155 +81,116 @@
         - [x] OpenAI
         - [x] Azure
         - [x] DeepSeek
-        - [ ] Claude
+        - [x] Claude
 
 ## 模型支持
-此版本额外支持以下模型:
+
+此版本支持多种模型,详情请参考[接口文档-中继接口](https://docs.newapi.pro/api):
+
 1. 第三方模型 **gpts** (gpt-4-gizmo-*)
-2. [Midjourney-Proxy(Plus)](https://github.com/novicezk/midjourney-proxy)接口,[对接文档](Midjourney.md)
+2. [Midjourney-Proxy(Plus)](https://github.com/novicezk/midjourney-proxy)接口,[接口文档](https://docs.newapi.pro/api/midjourney-proxy-image)
 3. 自定义渠道,支持填入完整调用地址
-4. [Suno API](https://github.com/Suno-API/Suno-API) 接口,[对接文档](Suno.md)
-5. Rerank模型,目前支持[Cohere](https://cohere.ai/)和[Jina](https://jina.ai/),[对接文档](Rerank.md)
-6. Dify
-
-您可以在渠道中添加自定义模型gpt-4-gizmo-*,此模型并非OpenAI官方模型,而是第三方模型,使用官方key无法调用。
-
-## 比原版One API多出的配置
-- `GENERATE_DEFAULT_TOKEN`:是否为新注册用户生成初始令牌,默认为 `false`。
-- `STREAMING_TIMEOUT`:设置流式一次回复的超时时间,默认为 60 秒。
-- `DIFY_DEBUG`:设置 Dify 渠道是否输出工作流和节点信息到客户端,默认为 `true`。
-- `FORCE_STREAM_OPTION`:是否覆盖客户端stream_options参数,请求上游返回流模式usage,默认为 `true`,建议开启,不影响客户端传入stream_options参数返回结果。
-- `GET_MEDIA_TOKEN`:是否统计图片token,默认为 `true`,关闭后将不再在本地计算图片token,可能会导致和上游计费不同,此项覆盖 `GET_MEDIA_TOKEN_NOT_STREAM` 选项作用。
-- `GET_MEDIA_TOKEN_NOT_STREAM`:是否在非流(`stream=false`)情况下统计图片token,默认为 `true`。
-- `UPDATE_TASK`:是否更新异步任务(Midjourney、Suno),默认为 `true`,关闭后将不会更新任务进度。
-- `COHERE_SAFETY_SETTING`:Cohere模型[安全设置](https://docs.cohere.com/docs/safety-modes#overview),可选值为 `NONE`, `CONTEXTUAL`, `STRICT`,默认为 `NONE`。
-- `GEMINI_VISION_MAX_IMAGE_NUM`:Gemini模型最大图片数量,默认为 `16`,设置为 `-1` 则不限制。
-- `MAX_FILE_DOWNLOAD_MB`: 最大文件下载大小,单位 MB,默认为 `20`。
-- `CRYPTO_SECRET`:加密密钥,用于加密数据库内容。
-- `AZURE_DEFAULT_API_VERSION`:Azure渠道默认API版本,如果渠道设置中未指定API版本,则使用此版本,默认为 `2024-12-01-preview`
-- `NOTIFICATION_LIMIT_DURATION_MINUTE`:通知限制的持续时间(分钟),默认为 `10`。
-- `NOTIFY_LIMIT_COUNT`:用户通知在指定持续时间内的最大数量,默认为 `2`。
-
-## 已废弃的环境变量
-- ~~`GEMINI_MODEL_MAP`(已废弃)~~:改为到`设置-模型相关设置`中设置
-- ~~`GEMINI_SAFETY_SETTING`(已废弃)~~:改为到`设置-模型相关设置`中设置
+4. [Suno API](https://github.com/Suno-API/Suno-API)接口,[接口文档](https://docs.newapi.pro/api/suno-music)
+5. Rerank模型([Cohere](https://cohere.ai/)和[Jina](https://jina.ai/)),[接口文档](https://docs.newapi.pro/api/jinaai-rerank)
+6. Claude Messages 格式,[接口文档](https://docs.newapi.pro/api/anthropic-chat)
+7. Dify
+
+## 环境变量配置
+
+详细配置说明请参考[安装指南-环境变量配置](https://docs.newapi.pro/installation/environment-variables):
+
+- `GENERATE_DEFAULT_TOKEN`:是否为新注册用户生成初始令牌,默认为 `false`
+- `STREAMING_TIMEOUT`:流式回复超时时间,默认60秒
+- `DIFY_DEBUG`:Dify渠道是否输出工作流和节点信息,默认 `true`
+- `FORCE_STREAM_OPTION`:是否覆盖客户端stream_options参数,默认 `true`
+- `GET_MEDIA_TOKEN`:是否统计图片token,默认 `true`
+- `GET_MEDIA_TOKEN_NOT_STREAM`:非流情况下是否统计图片token,默认 `true`
+- `UPDATE_TASK`:是否更新异步任务(Midjourney、Suno),默认 `true`
+- `COHERE_SAFETY_SETTING`:Cohere模型安全设置,可选值为 `NONE`, `CONTEXTUAL`, `STRICT`,默认 `NONE`
+- `GEMINI_VISION_MAX_IMAGE_NUM`:Gemini模型最大图片数量,默认 `16`
+- `MAX_FILE_DOWNLOAD_MB`: 最大文件下载大小,单位MB,默认 `20`
+- `CRYPTO_SECRET`:加密密钥,用于加密数据库内容
+- `AZURE_DEFAULT_API_VERSION`:Azure渠道默认API版本,默认 `2024-12-01-preview`
+- `NOTIFICATION_LIMIT_DURATION_MINUTE`:通知限制持续时间,默认 `10`分钟
+- `NOTIFY_LIMIT_COUNT`:用户通知在指定持续时间内的最大数量,默认 `2`
 
 ## 部署
 
+详细部署指南请参考[安装指南-部署方式](https://docs.newapi.pro/installation):
+
 > [!TIP]
 > 最新版Docker镜像:`calciumion/new-api:latest`  
 > 默认账号root 密码123456
 
-### 多机部署
-- 必须设置环境变量 `SESSION_SECRET`,否则会导致多机部署时登录状态不一致
-- 如果公用Redis,必须设置 `CRYPTO_SECRET`,否则会导致多机部署时Redis内容无法获取
+### 多机部署注意事项
+- 必须设置环境变量 `SESSION_SECRET`,否则会导致多机部署时登录状态不一致
+- 如果公用Redis,必须设置 `CRYPTO_SECRET`,否则会导致多机部署时Redis内容无法获取
 
 ### 部署要求
-- 本地数据库(默认):SQLite(Docker 部署默认使用 SQLite,必须挂载 `/data` 目录到宿主机
-- 远程数据库:MySQL 版本 >= 5.7.8,PgSQL 版本 >= 9.6
+- 本地数据库(默认):SQLite(Docker部署必须挂载`/data`目录)
+- 远程数据库:MySQL版本 >= 5.7.8,PgSQL版本 >= 9.6
 
-### 使用宝塔面板Docker功能部署
-安装宝塔面板 (**9.2.0版本**及以上),前往 [宝塔面板](https://www.bt.cn/new/download.html) 官网,选择正式版的脚本下载安装  
-安装后登录宝塔面板,在菜单栏中点击 Docker ,首次进入会提示安装 Docker 服务,点击立即安装,按提示完成安装  
-安装完成后在应用商店中找到 **New-API** ,点击安装,配置基本选项 即可完成安装  
-[图文教程](BT.md)
+### 部署方式
 
-### 基于 Docker 进行部署
-
-> [!TIP]
-> 默认管理员账号root 密码123456
+#### 使用宝塔面板Docker功能部署
+安装宝塔面板(**9.2.0版本**及以上),在应用商店中找到**New-API**安装即可。
+[图文教程](BT.md)
 
-### 使用 Docker Compose 部署(推荐)
+#### 使用Docker Compose部署(推荐)
 ```shell
 # 下载项目
 git clone https://github.com/Calcium-Ion/new-api.git
 cd new-api
-# 按需编辑 docker-compose.yml
-# nano docker-compose.yml
-# vim docker-compose.yml
+# 按需编辑docker-compose.yml
 # 启动
 docker-compose up -d
 ```
 
-#### 更新版本
-```shell
-docker-compose pull
-docker-compose up -d
-```
-
-### 直接使用 Docker 镜像
+#### 直接使用Docker镜像
 ```shell
-# 使用 SQLite 的部署命令:
+# 使用SQLite
 docker run --name new-api -d --restart always -p 3000:3000 -e TZ=Asia/Shanghai -v /home/ubuntu/data/new-api:/data calciumion/new-api:latest
 
-# 使用 MySQL 的部署命令,在上面的基础上添加 `-e SQL_DSN="root:123456@tcp(localhost:3306)/oneapi"`,请自行修改数据库连接参数。
-# 例如:
+# 使用MySQL
 docker run --name new-api -d --restart always -p 3000:3000 -e SQL_DSN="root:123456@tcp(localhost:3306)/oneapi" -e TZ=Asia/Shanghai -v /home/ubuntu/data/new-api:/data calciumion/new-api:latest
 ```
 
-#### 更新版本
-```shell
-# 拉取最新镜像
-docker pull calciumion/new-api:latest
-# 停止并删除旧容器
-docker stop new-api
-docker rm new-api
-# 使用相同参数运行新容器
-docker run --name new-api -d --restart always -p 3000:3000 -e TZ=Asia/Shanghai -v /home/ubuntu/data/new-api:/data calciumion/new-api:latest
-```
+## 渠道重试与缓存
+渠道重试功能已经实现,可以在`设置->运营设置->通用设置`设置重试次数,**建议开启缓存**功能。
 
-或者使用 Watchtower 自动更新(不推荐,可能会导致数据库不兼容):
-```shell
-docker run --rm -v /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower -cR
-```
-
-## 渠道重试
-渠道重试功能已经实现,可以在`设置->运营设置->通用设置`设置重试次数,**建议开启缓存**功能。  
-如果开启了重试功能,重试使用下一个优先级,以此类推。
 ### 缓存设置方法
-1. `REDIS_CONN_STRING`:设置之后将使用 Redis 作为缓存使用。
-    + 例子:`REDIS_CONN_STRING=redis://default:redispw@localhost:49153`
-2. `MEMORY_CACHE_ENABLED`:启用内存缓存(如果设置了`REDIS_CONN_STRING`,则无需手动设置),会导致用户额度的更新存在一定的延迟,可选值为 `true` 和 `false`,未设置则默认为 `false`。
-    + 例子:`MEMORY_CACHE_ENABLED=true`
-### 为什么有的时候没有重试
-这些错误码不会重试:400,504,524
-### 我想让400也重试
-在`渠道->编辑`中,将`状态码复写`改为
-```json
-{
-  "400": "500"
-}
-```
-可以实现400错误转为500错误,从而重试
+1. `REDIS_CONN_STRING`:设置Redis作为缓存
+2. `MEMORY_CACHE_ENABLED`:启用内存缓存(设置了Redis则无需手动设置)
 
-## Midjourney接口设置文档
-[对接文档](Midjourney.md)
+## 接口文档
 
-## Suno接口设置文档
-[对接文档](Suno.md)
+详细接口文档请参考[接口文档](https://docs.newapi.pro/api):
 
-## 界面截图
-![image](https://github.com/user-attachments/assets/a0dcd349-5df8-4dc8-9acf-ca272b239919)
-
-
-![image](https://github.com/user-attachments/assets/c7d0f7e1-729c-43e2-ac7c-2cb73b0afc8e)
-
-![image](https://github.com/user-attachments/assets/29f81de5-33fc-4fc5-a5ff-f9b54b653c7c)
-
-![image](https://github.com/user-attachments/assets/4fa53e18-d2c5-477a-9b26-b86e44c71e35)
-
-## 交流群
-<img src="https://github.com/user-attachments/assets/9ca0bc82-e057-4230-a28d-9f198fa022e3" width="200">
+- [聊天接口(Chat)](https://docs.newapi.pro/api/openai-chat)
+- [图像接口(Image)](https://docs.newapi.pro/api/openai-image)
+- [Midjourney接口](https://docs.newapi.pro/api/midjourney-proxy-image)
+- [音乐接口(Music)](https://docs.newapi.pro/api/relay/music)
+- [Suno接口](https://docs.newapi.pro/api/suno-music)
+- [重排序接口(Rerank)](https://docs.newapi.pro/api/jinaai-rerank)
+- [实时对话接口(Realtime)](https://docs.newapi.pro/api/openai-realtime)
+- [Claude聊天接口(messages)](https://docs.newapi.pro/api/anthropic-chat)
 
 ## 相关项目
 - [One API](https://github.com/songquanpeng/one-api):原版项目
 - [Midjourney-Proxy](https://github.com/novicezk/midjourney-proxy):Midjourney接口支持
-- [chatnio](https://github.com/Deeptrain-Community/chatnio):下一代 AI 一站式 B/C 端解决方案
+- [chatnio](https://github.com/Deeptrain-Community/chatnio):下一代AI一站式B/C端解决方案
 - [neko-api-key-tool](https://github.com/Calcium-Ion/neko-api-key-tool):用key查询使用额度
 
 其他基于New API的项目:
-- [new-api-horizon](https://github.com/Calcium-Ion/new-api-horizon):New API高性能优化版,专注于高并发优化,并支持Claude格式
-- [VoAPI](https://github.com/VoAPI/VoAPI):基于New API的前端美化版本,闭源免费
+- [new-api-horizon](https://github.com/Calcium-Ion/new-api-horizon):New API高性能优化版
+- [VoAPI](https://github.com/VoAPI/VoAPI):基于New API的前端美化版本
+
+## 帮助支持
+
+如有问题,请参考[帮助支持](https://docs.newapi.pro/support):
+- [社区交流](https://docs.newapi.pro/support/community-interaction)
+- [反馈问题](https://docs.newapi.pro/support/feedback-issues)
+- [常见问题](https://docs.newapi.pro/support/faq)
 
 ## 🌟 Star History
 

+ 2 - 0
common/constants.go

@@ -234,6 +234,7 @@ const (
 	ChannelTypeMokaAI         = 44
 	ChannelTypeVolcEngine     = 45
 	ChannelTypeBaiduV2        = 46
+	ChannelTypeXinference     = 47
 	ChannelTypeDummy          // this one is only for count, do not add any channel after this
 
 )
@@ -286,4 +287,5 @@ var ChannelBaseURLs = []string{
 	"https://api.moka.ai",                       //44
 	"https://ark.cn-beijing.volces.com",         //45
 	"https://qianfan.baidubce.com",              //46
+	"",                                          //47
 }

+ 1 - 1
common/custom-event.go

@@ -44,7 +44,7 @@ var fieldReplacer = strings.NewReplacer(
 	"\r", "\\r")
 
 var dataReplacer = strings.NewReplacer(
-	"\n", "\ndata:",
+	"\n", "\n",
 	"\r", "\\r")
 
 type CustomEvent struct {

+ 1 - 1
controller/channel-test.go

@@ -107,7 +107,7 @@ func testChannel(channel *model.Channel, testModel string) (err error, openAIErr
 
 	adaptor.Init(info)
 
-	convertedRequest, err := adaptor.ConvertRequest(c, info, request)
+	convertedRequest, err := adaptor.ConvertOpenAIRequest(c, info, request)
 	if err != nil {
 		return err, nil
 	}

+ 51 - 0
controller/relay.go

@@ -148,6 +148,50 @@ func WssRelay(c *gin.Context) {
 	}
 }
 
+func RelayClaude(c *gin.Context) {
+	//relayMode := constant.Path2RelayMode(c.Request.URL.Path)
+	requestId := c.GetString(common.RequestIdKey)
+	group := c.GetString("group")
+	originalModel := c.GetString("original_model")
+	var claudeErr *dto.ClaudeErrorWithStatusCode
+
+	for i := 0; i <= common.RetryTimes; i++ {
+		channel, err := getChannel(c, group, originalModel, i)
+		if err != nil {
+			common.LogError(c, err.Error())
+			claudeErr = service.ClaudeErrorWrapperLocal(err, "get_channel_failed", http.StatusInternalServerError)
+			break
+		}
+
+		claudeErr = claudeRequest(c, channel)
+
+		if claudeErr == nil {
+			return // 成功处理请求,直接返回
+		}
+
+		openaiErr := service.ClaudeErrorToOpenAIError(claudeErr)
+
+		go processChannelError(c, channel.Id, channel.Type, channel.Name, channel.GetAutoBan(), openaiErr)
+
+		if !shouldRetry(c, openaiErr, common.RetryTimes-i) {
+			break
+		}
+	}
+	useChannel := c.GetStringSlice("use_channel")
+	if len(useChannel) > 1 {
+		retryLogStr := fmt.Sprintf("重试:%s", strings.Trim(strings.Join(strings.Fields(fmt.Sprint(useChannel)), "->"), "[]"))
+		common.LogInfo(c, retryLogStr)
+	}
+
+	if claudeErr != nil {
+		claudeErr.Error.Message = common.MessageWithRequestId(claudeErr.Error.Message, requestId)
+		c.JSON(claudeErr.StatusCode, gin.H{
+			"type":  "error",
+			"error": claudeErr.Error,
+		})
+	}
+}
+
 func relayRequest(c *gin.Context, relayMode int, channel *model.Channel) *dto.OpenAIErrorWithStatusCode {
 	addUsedChannel(c, channel.Id)
 	requestBody, _ := common.GetRequestBody(c)
@@ -162,6 +206,13 @@ func wssRequest(c *gin.Context, ws *websocket.Conn, relayMode int, channel *mode
 	return relay.WssHelper(c, ws)
 }
 
+func claudeRequest(c *gin.Context, channel *model.Channel) *dto.ClaudeErrorWithStatusCode {
+	addUsedChannel(c, channel.Id)
+	requestBody, _ := common.GetRequestBody(c)
+	c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
+	return relay.ClaudeHelper(c)
+}
+
 func addUsedChannel(c *gin.Context, channelId int) {
 	useChannel := c.GetStringSlice("use_channel")
 	useChannel = append(useChannel, fmt.Sprintf("%d", channelId))

+ 212 - 0
dto/claude.go

@@ -0,0 +1,212 @@
+package dto
+
+import "encoding/json"
+
+type ClaudeMetadata struct {
+	UserId string `json:"user_id"`
+}
+
+type ClaudeMediaMessage struct {
+	Type        string               `json:"type"`
+	Text        *string              `json:"text,omitempty"`
+	Model       string               `json:"model,omitempty"`
+	Source      *ClaudeMessageSource `json:"source,omitempty"`
+	Usage       *ClaudeUsage         `json:"usage,omitempty"`
+	StopReason  *string              `json:"stop_reason,omitempty"`
+	PartialJson *string              `json:"partial_json,omitempty"`
+	Role        string               `json:"role,omitempty"`
+	Thinking    string               `json:"thinking,omitempty"`
+	Signature   string               `json:"signature,omitempty"`
+	Delta       string               `json:"delta,omitempty"`
+	// tool_calls
+	Id        string          `json:"id,omitempty"`
+	Name      string          `json:"name,omitempty"`
+	Input     any             `json:"input,omitempty"`
+	Content   json.RawMessage `json:"content,omitempty"`
+	ToolUseId string          `json:"tool_use_id,omitempty"`
+}
+
+func (c *ClaudeMediaMessage) SetText(s string) {
+	c.Text = &s
+}
+
+func (c *ClaudeMediaMessage) GetText() string {
+	if c.Text == nil {
+		return ""
+	}
+	return *c.Text
+}
+
+func (c *ClaudeMediaMessage) IsStringContent() bool {
+	var content string
+	return json.Unmarshal(c.Content, &content) == nil
+}
+
+func (c *ClaudeMediaMessage) GetStringContent() string {
+	var content string
+	if err := json.Unmarshal(c.Content, &content); err == nil {
+		return content
+	}
+	return ""
+}
+
+func (c *ClaudeMediaMessage) SetContent(content any) {
+	jsonContent, _ := json.Marshal(content)
+	c.Content = jsonContent
+}
+
+func (c *ClaudeMediaMessage) ParseMediaContent() []ClaudeMediaMessage {
+	var mediaContent []ClaudeMediaMessage
+	if err := json.Unmarshal(c.Content, &mediaContent); err == nil {
+		return mediaContent
+	}
+	return make([]ClaudeMediaMessage, 0)
+}
+
+type ClaudeMessageSource struct {
+	Type      string `json:"type"`
+	MediaType string `json:"media_type"`
+	Data      any    `json:"data"`
+}
+
+type ClaudeMessage struct {
+	Role    string `json:"role"`
+	Content any    `json:"content"`
+}
+
+func (c *ClaudeMessage) IsStringContent() bool {
+	_, ok := c.Content.(string)
+	return ok
+}
+
+func (c *ClaudeMessage) GetStringContent() string {
+	if c.IsStringContent() {
+		return c.Content.(string)
+	}
+	return ""
+}
+
+func (c *ClaudeMessage) SetStringContent(content string) {
+	c.Content = content
+}
+
+func (c *ClaudeMessage) ParseContent() ([]ClaudeMediaMessage, error) {
+	// map content to []ClaudeMediaMessage
+	// parse to json
+	jsonContent, _ := json.Marshal(c.Content)
+	var contentList []ClaudeMediaMessage
+	err := json.Unmarshal(jsonContent, &contentList)
+	if err != nil {
+		return make([]ClaudeMediaMessage, 0), err
+	}
+	return contentList, nil
+}
+
+type Tool struct {
+	Name        string                 `json:"name"`
+	Description string                 `json:"description,omitempty"`
+	InputSchema map[string]interface{} `json:"input_schema"`
+}
+
+type InputSchema struct {
+	Type       string `json:"type"`
+	Properties any    `json:"properties,omitempty"`
+	Required   any    `json:"required,omitempty"`
+}
+
+type ClaudeRequest struct {
+	Model             string          `json:"model"`
+	Prompt            string          `json:"prompt,omitempty"`
+	System            any             `json:"system,omitempty"`
+	Messages          []ClaudeMessage `json:"messages,omitempty"`
+	MaxTokens         uint            `json:"max_tokens,omitempty"`
+	MaxTokensToSample uint            `json:"max_tokens_to_sample,omitempty"`
+	StopSequences     []string        `json:"stop_sequences,omitempty"`
+	Temperature       *float64        `json:"temperature,omitempty"`
+	TopP              float64         `json:"top_p,omitempty"`
+	TopK              int             `json:"top_k,omitempty"`
+	//ClaudeMetadata    `json:"metadata,omitempty"`
+	Stream     bool      `json:"stream,omitempty"`
+	Tools      any       `json:"tools,omitempty"`
+	ToolChoice any       `json:"tool_choice,omitempty"`
+	Thinking   *Thinking `json:"thinking,omitempty"`
+}
+
+type Thinking struct {
+	Type         string `json:"type"`
+	BudgetTokens int    `json:"budget_tokens"`
+}
+
+func (c *ClaudeRequest) IsStringSystem() bool {
+	_, ok := c.System.(string)
+	return ok
+}
+
+func (c *ClaudeRequest) GetStringSystem() string {
+	if c.IsStringSystem() {
+		return c.System.(string)
+	}
+	return ""
+}
+
+func (c *ClaudeRequest) SetStringSystem(system string) {
+	c.System = system
+}
+
+func (c *ClaudeRequest) ParseSystem() []ClaudeMediaMessage {
+	// map content to []ClaudeMediaMessage
+	// parse to json
+	jsonContent, _ := json.Marshal(c.System)
+	var contentList []ClaudeMediaMessage
+	if err := json.Unmarshal(jsonContent, &contentList); err == nil {
+		return contentList
+	}
+	return make([]ClaudeMediaMessage, 0)
+}
+
+type ClaudeError struct {
+	Type    string `json:"type"`
+	Message string `json:"message"`
+}
+
+type ClaudeErrorWithStatusCode struct {
+	Error      ClaudeError `json:"error"`
+	StatusCode int         `json:"status_code"`
+	LocalError bool
+}
+
+type ClaudeResponse struct {
+	Id           string               `json:"id,omitempty"`
+	Type         string               `json:"type"`
+	Role         string               `json:"role,omitempty"`
+	Content      []ClaudeMediaMessage `json:"content,omitempty"`
+	Completion   string               `json:"completion,omitempty"`
+	StopReason   string               `json:"stop_reason,omitempty"`
+	Model        string               `json:"model,omitempty"`
+	Error        ClaudeError          `json:"error,omitempty"`
+	Usage        *ClaudeUsage         `json:"usage,omitempty"`
+	Index        *int                 `json:"index,omitempty"`
+	ContentBlock *ClaudeMediaMessage  `json:"content_block,omitempty"`
+	Delta        *ClaudeMediaMessage  `json:"delta,omitempty"`
+	Message      *ClaudeMediaMessage  `json:"message,omitempty"`
+}
+
+// set index
+func (c *ClaudeResponse) SetIndex(i int) {
+	c.Index = &i
+}
+
+// get index
+func (c *ClaudeResponse) GetIndex() int {
+	if c.Index == nil {
+		return 0
+	}
+	return *c.Index
+}
+
+type ClaudeUsage struct {
+	InputTokens              int `json:"input_tokens"`
+	CacheCreationInputTokens int `json:"cache_creation_input_tokens"`
+	CacheReadInputTokens     int `json:"cache_read_input_tokens"`
+	OutputTokens             int `json:"output_tokens"`
+}

+ 14 - 11
dto/openai_response.go

@@ -1,16 +1,5 @@
 package dto
 
-type TextResponseWithError struct {
-	Id      string                        `json:"id"`
-	Object  string                        `json:"object"`
-	Created int64                         `json:"created"`
-	Choices []OpenAITextResponseChoice    `json:"choices"`
-	Data    []OpenAIEmbeddingResponseItem `json:"data"`
-	Model   string                        `json:"model"`
-	Usage   `json:"usage"`
-	Error   OpenAIError `json:"error"`
-}
-
 type SimpleResponse struct {
 	Usage   `json:"usage"`
 	Error   OpenAIError                `json:"error"`
@@ -125,6 +114,20 @@ type ChatCompletionsStreamResponse struct {
 	Usage             *Usage                                `json:"usage"`
 }
 
+func (c *ChatCompletionsStreamResponse) IsToolCall() bool {
+	if len(c.Choices) == 0 {
+		return false
+	}
+	return len(c.Choices[0].Delta.ToolCalls) > 0
+}
+
+func (c *ChatCompletionsStreamResponse) GetFirstToolCall() *ToolCallResponse {
+	if c.IsToolCall() {
+		return &c.Choices[0].Delta.ToolCalls[0]
+	}
+	return nil
+}
+
 func (c *ChatCompletionsStreamResponse) Copy() *ChatCompletionsStreamResponse {
 	choices := make([]ChatCompletionsStreamResponseChoice, len(c.Choices))
 	copy(choices, c.Choices)

+ 5 - 4
dto/realtime.go

@@ -44,10 +44,11 @@ type RealtimeUsage struct {
 }
 
 type InputTokenDetails struct {
-	CachedTokens int `json:"cached_tokens"`
-	TextTokens   int `json:"text_tokens"`
-	AudioTokens  int `json:"audio_tokens"`
-	ImageTokens  int `json:"image_tokens"`
+	CachedTokens         int `json:"cached_tokens"`
+	CachedCreationTokens int
+	TextTokens           int `json:"text_tokens"`
+	AudioTokens          int `json:"audio_tokens"`
+	ImageTokens          int `json:"image_tokens"`
 }
 
 type OutputTokenDetails struct {

+ 1 - 1
go.mod

@@ -11,6 +11,7 @@ require (
 	github.com/aws/aws-sdk-go-v2/credentials v1.17.11
 	github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.7.4
 	github.com/bytedance/gopkg v0.0.0-20220118071334-3db87571198b
+	github.com/bytedance/sonic v1.11.6
 	github.com/gin-contrib/cors v1.7.2
 	github.com/gin-contrib/gzip v0.0.6
 	github.com/gin-contrib/sessions v0.0.5
@@ -42,7 +43,6 @@ require (
 	github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect
 	github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect
 	github.com/aws/smithy-go v1.20.2 // indirect
-	github.com/bytedance/sonic v1.11.6 // indirect
 	github.com/bytedance/sonic/loader v0.1.1 // indirect
 	github.com/cespare/xxhash/v2 v2.3.0 // indirect
 	github.com/cloudwego/base64x v0.1.4 // indirect

+ 8 - 0
middleware/auth.go

@@ -174,6 +174,14 @@ func TokenAuth() func(c *gin.Context) {
 			}
 			c.Request.Header.Set("Authorization", "Bearer "+key)
 		}
+		// 检查path包含/v1/messages
+		if strings.Contains(c.Request.URL.Path, "/v1/messages") {
+			// 从x-api-key中获取key
+			key := c.Request.Header.Get("x-api-key")
+			if key != "" {
+				c.Request.Header.Set("Authorization", "Bearer "+key)
+			}
+		}
 		key := c.Request.Header.Get("Authorization")
 		parts := make([]string, 0)
 		key = strings.TrimPrefix(key, "Bearer ")

+ 6 - 5
model/main.go

@@ -1,16 +1,17 @@
 package model
 
 import (
-	"github.com/glebarez/sqlite"
-	"gorm.io/driver/mysql"
-	"gorm.io/driver/postgres"
-	"gorm.io/gorm"
 	"log"
 	"one-api/common"
 	"os"
 	"strings"
 	"sync"
 	"time"
+
+	"github.com/glebarez/sqlite"
+	"gorm.io/driver/mysql"
+	"gorm.io/driver/postgres"
+	"gorm.io/gorm"
 )
 
 var groupCol string
@@ -60,7 +61,7 @@ func chooseDB(envName string) (*gorm.DB, error) {
 	}()
 	dsn := os.Getenv(envName)
 	if dsn != "" {
-		if strings.HasPrefix(dsn, "postgres://") {
+		if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") {
 			// Use PostgreSQL
 			common.SysLog("using PostgreSQL as database")
 			common.UsingPostgreSQL = true

+ 2 - 1
relay/channel/adapter.go

@@ -13,7 +13,7 @@ type Adaptor interface {
 	Init(info *relaycommon.RelayInfo)
 	GetRequestURL(info *relaycommon.RelayInfo) (string, error)
 	SetupRequestHeader(c *gin.Context, req *http.Header, info *relaycommon.RelayInfo) error
-	ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error)
+	ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error)
 	ConvertRerankRequest(c *gin.Context, relayMode int, request dto.RerankRequest) (any, error)
 	ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.EmbeddingRequest) (any, error)
 	ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error)
@@ -22,6 +22,7 @@ type Adaptor interface {
 	DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *dto.OpenAIErrorWithStatusCode)
 	GetModelList() []string
 	GetChannelName() string
+	ConvertClaudeRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.ClaudeRequest) (any, error)
 }
 
 type TaskAdaptor interface {

+ 8 - 2
relay/channel/ali/adaptor.go

@@ -16,6 +16,12 @@ import (
 type Adaptor struct {
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) Init(info *relaycommon.RelayInfo) {
 }
 
@@ -44,7 +50,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}
@@ -87,7 +93,7 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
 		if info.IsStream {
 			err, usage = openai.OaiStreamHandler(c, resp, info)
 		} else {
-			err, usage = openai.OpenaiHandler(c, resp, info.PromptTokens, info.UpstreamModelName)
+			err, usage = openai.OpenaiHandler(c, resp, info)
 		}
 	}
 	return

+ 5 - 7
relay/channel/ali/image.go

@@ -26,7 +26,7 @@ func oaiImage2Ali(request dto.ImageRequest) *AliImageRequest {
 	return &imageRequest
 }
 
-func updateTask(info *relaycommon.RelayInfo, taskID string, key string) (*AliResponse, error, []byte) {
+func updateTask(info *relaycommon.RelayInfo, taskID string) (*AliResponse, error, []byte) {
 	url := fmt.Sprintf("%s/api/v1/tasks/%s", info.BaseUrl, taskID)
 
 	var aliResponse AliResponse
@@ -36,7 +36,7 @@ func updateTask(info *relaycommon.RelayInfo, taskID string, key string) (*AliRes
 		return &aliResponse, err, nil
 	}
 
-	req.Header.Set("Authorization", "Bearer "+key)
+	req.Header.Set("Authorization", "Bearer "+info.ApiKey)
 
 	client := &http.Client{}
 	resp, err := client.Do(req)
@@ -58,7 +58,7 @@ func updateTask(info *relaycommon.RelayInfo, taskID string, key string) (*AliRes
 	return &response, nil, responseBody
 }
 
-func asyncTaskWait(info *relaycommon.RelayInfo, taskID string, key string) (*AliResponse, []byte, error) {
+func asyncTaskWait(info *relaycommon.RelayInfo, taskID string) (*AliResponse, []byte, error) {
 	waitSeconds := 3
 	step := 0
 	maxStep := 20
@@ -68,7 +68,7 @@ func asyncTaskWait(info *relaycommon.RelayInfo, taskID string, key string) (*Ali
 
 	for {
 		step++
-		rsp, err, body := updateTask(info, taskID, key)
+		rsp, err, body := updateTask(info, taskID)
 		responseBody = body
 		if err != nil {
 			return &taskResponse, responseBody, err
@@ -125,8 +125,6 @@ func responseAli2OpenAIImage(c *gin.Context, response *AliResponse, info *relayc
 }
 
 func aliImageHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
-	apiKey := c.Request.Header.Get("Authorization")
-	apiKey = strings.TrimPrefix(apiKey, "Bearer ")
 	responseFormat := c.GetString("response_format")
 
 	var aliTaskResponse AliResponse
@@ -148,7 +146,7 @@ func aliImageHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rela
 		return service.OpenAIErrorWrapper(errors.New(aliTaskResponse.Message), "ali_async_task_failed", http.StatusInternalServerError), nil
 	}
 
-	aliResponse, _, err := asyncTaskWait(info, aliTaskResponse.Output.TaskId, apiKey)
+	aliResponse, _, err := asyncTaskWait(info, aliTaskResponse.Output.TaskId)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "ali_async_task_wait_failed", http.StatusInternalServerError), nil
 	}

+ 4 - 0
relay/channel/api_request.go

@@ -7,6 +7,7 @@ import (
 	"github.com/gorilla/websocket"
 	"io"
 	"net/http"
+	common2 "one-api/common"
 	"one-api/relay/common"
 	"one-api/relay/constant"
 	"one-api/service"
@@ -31,6 +32,9 @@ func DoApiRequest(a Adaptor, c *gin.Context, info *common.RelayInfo, requestBody
 	if err != nil {
 		return nil, fmt.Errorf("get request url failed: %w", err)
 	}
+	if common2.DebugEnabled {
+		println("fullRequestURL:", fullRequestURL)
+	}
 	req, err := http.NewRequest(c.Request.Method, fullRequestURL, requestBody)
 	if err != nil {
 		return nil, fmt.Errorf("new request failed: %w", err)

+ 6 - 2
relay/channel/aws/adaptor.go

@@ -20,6 +20,10 @@ type Adaptor struct {
 	RequestMode int
 }
 
+func (a *Adaptor) ConvertClaudeRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.ClaudeRequest) (any, error) {
+	return request, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -43,12 +47,12 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}
 
-	var claudeReq *claude.ClaudeRequest
+	var claudeReq *dto.ClaudeRequest
 	var err error
 	claudeReq, err = claude.RequestOpenAI2ClaudeMessage(*request)
 	if err != nil {

+ 13 - 13
relay/channel/aws/dto.go

@@ -1,25 +1,25 @@
 package aws
 
 import (
-	"one-api/relay/channel/claude"
+	"one-api/dto"
 )
 
 type AwsClaudeRequest struct {
 	// AnthropicVersion should be "bedrock-2023-05-31"
-	AnthropicVersion string                 `json:"anthropic_version"`
-	System           string                 `json:"system,omitempty"`
-	Messages         []claude.ClaudeMessage `json:"messages"`
-	MaxTokens        uint                   `json:"max_tokens,omitempty"`
-	Temperature      *float64               `json:"temperature,omitempty"`
-	TopP             float64                `json:"top_p,omitempty"`
-	TopK             int                    `json:"top_k,omitempty"`
-	StopSequences    []string               `json:"stop_sequences,omitempty"`
-	Tools            any                    `json:"tools,omitempty"`
-	ToolChoice       any                    `json:"tool_choice,omitempty"`
-	Thinking         *claude.Thinking       `json:"thinking,omitempty"`
+	AnthropicVersion string              `json:"anthropic_version"`
+	System           any                 `json:"system,omitempty"`
+	Messages         []dto.ClaudeMessage `json:"messages"`
+	MaxTokens        uint                `json:"max_tokens,omitempty"`
+	Temperature      *float64            `json:"temperature,omitempty"`
+	TopP             float64             `json:"top_p,omitempty"`
+	TopK             int                 `json:"top_k,omitempty"`
+	StopSequences    []string            `json:"stop_sequences,omitempty"`
+	Tools            any                 `json:"tools,omitempty"`
+	ToolChoice       any                 `json:"tool_choice,omitempty"`
+	Thinking         *dto.Thinking       `json:"thinking,omitempty"`
 }
 
-func copyRequest(req *claude.ClaudeRequest) *AwsClaudeRequest {
+func copyRequest(req *dto.ClaudeRequest) *AwsClaudeRequest {
 	return &AwsClaudeRequest{
 		AnthropicVersion: "bedrock-2023-05-31",
 		System:           req.System,

+ 31 - 34
relay/channel/aws/relay-aws.go

@@ -9,7 +9,7 @@ import (
 	"io"
 	"net/http"
 	"one-api/common"
-	relaymodel "one-api/dto"
+	"one-api/dto"
 	"one-api/relay/channel/claude"
 	relaycommon "one-api/relay/common"
 	"one-api/relay/helper"
@@ -39,10 +39,10 @@ func newAwsClient(c *gin.Context, info *relaycommon.RelayInfo) (*bedrockruntime.
 	return client, nil
 }
 
-func wrapErr(err error) *relaymodel.OpenAIErrorWithStatusCode {
-	return &relaymodel.OpenAIErrorWithStatusCode{
+func wrapErr(err error) *dto.OpenAIErrorWithStatusCode {
+	return &dto.OpenAIErrorWithStatusCode{
 		StatusCode: http.StatusInternalServerError,
-		Error: relaymodel.OpenAIError{
+		Error: dto.OpenAIError{
 			Message: fmt.Sprintf("%s", err.Error()),
 		},
 	}
@@ -56,7 +56,7 @@ func awsModelID(requestModel string) (string, error) {
 	return requestModel, nil
 }
 
-func awsHandler(c *gin.Context, info *relaycommon.RelayInfo, requestMode int) (*relaymodel.OpenAIErrorWithStatusCode, *relaymodel.Usage) {
+func awsHandler(c *gin.Context, info *relaycommon.RelayInfo, requestMode int) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
 	awsCli, err := newAwsClient(c, info)
 	if err != nil {
 		return wrapErr(errors.Wrap(err, "newAwsClient")), nil
@@ -77,7 +77,7 @@ func awsHandler(c *gin.Context, info *relaycommon.RelayInfo, requestMode int) (*
 	if !ok {
 		return wrapErr(errors.New("request not found")), nil
 	}
-	claudeReq := claudeReq_.(*claude.ClaudeRequest)
+	claudeReq := claudeReq_.(*dto.ClaudeRequest)
 	awsClaudeReq := copyRequest(claudeReq)
 	awsReq.Body, err = json.Marshal(awsClaudeReq)
 	if err != nil {
@@ -89,14 +89,14 @@ func awsHandler(c *gin.Context, info *relaycommon.RelayInfo, requestMode int) (*
 		return wrapErr(errors.Wrap(err, "InvokeModel")), nil
 	}
 
-	claudeResponse := new(claude.ClaudeResponse)
+	claudeResponse := new(dto.ClaudeResponse)
 	err = json.Unmarshal(awsResp.Body, claudeResponse)
 	if err != nil {
 		return wrapErr(errors.Wrap(err, "unmarshal response")), nil
 	}
 
 	openaiResp := claude.ResponseClaude2OpenAI(requestMode, claudeResponse)
-	usage := relaymodel.Usage{
+	usage := dto.Usage{
 		PromptTokens:     claudeResponse.Usage.InputTokens,
 		CompletionTokens: claudeResponse.Usage.OutputTokens,
 		TotalTokens:      claudeResponse.Usage.InputTokens + claudeResponse.Usage.OutputTokens,
@@ -107,7 +107,7 @@ func awsHandler(c *gin.Context, info *relaycommon.RelayInfo, requestMode int) (*
 	return nil, &usage
 }
 
-func awsStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo, requestMode int) (*relaymodel.OpenAIErrorWithStatusCode, *relaymodel.Usage) {
+func awsStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo, requestMode int) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
 	awsCli, err := newAwsClient(c, info)
 	if err != nil {
 		return wrapErr(errors.Wrap(err, "newAwsClient")), nil
@@ -128,7 +128,7 @@ func awsStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
 	if !ok {
 		return wrapErr(errors.New("request not found")), nil
 	}
-	claudeReq := claudeReq_.(*claude.ClaudeRequest)
+	claudeReq := claudeReq_.(*dto.ClaudeRequest)
 
 	awsClaudeReq := copyRequest(claudeReq)
 	awsReq.Body, err = json.Marshal(awsClaudeReq)
@@ -144,11 +144,14 @@ func awsStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
 	defer stream.Close()
 
 	c.Writer.Header().Set("Content-Type", "text/event-stream")
-	var usage relaymodel.Usage
-	var id string
-	var model string
+	claudeInfo := &claude.ClaudeResponseInfo{
+		ResponseId:   fmt.Sprintf("chatcmpl-%s", common.GetUUID()),
+		Created:      common.GetTimestamp(),
+		Model:        info.UpstreamModelName,
+		ResponseText: strings.Builder{},
+		Usage:        &dto.Usage{},
+	}
 	isFirst := true
-	createdTime := common.GetTimestamp()
 	c.Stream(func(w io.Writer) bool {
 		event, ok := <-stream.Events()
 		if !ok {
@@ -161,33 +164,19 @@ func awsStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
 				isFirst = false
 				info.FirstResponseTime = time.Now()
 			}
-			claudeResp := new(claude.ClaudeResponse)
-			err := json.NewDecoder(bytes.NewReader(v.Value.Bytes)).Decode(claudeResp)
+			claudeResponse := new(dto.ClaudeResponse)
+			err := json.NewDecoder(bytes.NewReader(v.Value.Bytes)).Decode(claudeResponse)
 			if err != nil {
 				common.SysError("error unmarshalling stream response: " + err.Error())
 				return false
 			}
 
-			response, claudeUsage := claude.StreamResponseClaude2OpenAI(requestMode, claudeResp)
-			if claudeUsage != nil {
-				usage.PromptTokens += claudeUsage.InputTokens
-				usage.CompletionTokens += claudeUsage.OutputTokens
-			}
+			response := claude.StreamResponseClaude2OpenAI(requestMode, claudeResponse)
 
-			if response == nil {
+			if !claude.FormatClaudeResponseInfo(RequestModeMessage, claudeResponse, response, claudeInfo) {
 				return true
 			}
 
-			if response.Id != "" {
-				id = response.Id
-			}
-			if response.Model != "" {
-				model = response.Model
-			}
-			response.Created = createdTime
-			response.Id = id
-			response.Model = model
-
 			jsonStr, err := json.Marshal(response)
 			if err != nil {
 				common.SysError("error marshalling stream response: " + err.Error())
@@ -203,8 +192,16 @@ func awsStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
 			return false
 		}
 	})
+
+	if claudeInfo.Usage.PromptTokens == 0 {
+		//上游出错
+	}
+	if claudeInfo.Usage.CompletionTokens == 0 {
+		claudeInfo.Usage, _ = service.ResponseText2Usage(claudeInfo.ResponseText.String(), info.UpstreamModelName, claudeInfo.Usage.PromptTokens)
+	}
+
 	if info.ShouldIncludeUsage {
-		response := helper.GenerateFinalUsageResponse(id, createdTime, info.UpstreamModelName, usage)
+		response := helper.GenerateFinalUsageResponse(claudeInfo.ResponseId, claudeInfo.Created, info.UpstreamModelName, *claudeInfo.Usage)
 		err := helper.ObjectData(c, response)
 		if err != nil {
 			common.SysError("send final response failed: " + err.Error())
@@ -217,5 +214,5 @@ func awsStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
 			return service.OpenAIErrorWrapperLocal(err, "close_response_body_failed", http.StatusInternalServerError), nil
 		}
 	}
-	return nil, &usage
+	return nil, claudeInfo.Usage
 }

+ 7 - 1
relay/channel/baidu/adaptor.go

@@ -16,6 +16,12 @@ import (
 type Adaptor struct {
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -104,7 +110,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}

+ 8 - 2
relay/channel/baidu_v2/adaptor.go

@@ -15,6 +15,12 @@ import (
 type Adaptor struct {
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -38,7 +44,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}
@@ -62,7 +68,7 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
 	if info.IsStream {
 		err, usage = openai.OaiStreamHandler(c, resp, info)
 	} else {
-		err, usage = openai.OpenaiHandler(c, resp, info.PromptTokens, info.UpstreamModelName)
+		err, usage = openai.OpenaiHandler(c, resp, info)
 	}
 	return
 }

+ 5 - 1
relay/channel/claude/adaptor.go

@@ -22,6 +22,10 @@ type Adaptor struct {
 	RequestMode int
 }
 
+func (a *Adaptor) ConvertClaudeRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.ClaudeRequest) (any, error) {
+	return request, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -60,7 +64,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}

+ 93 - 92
relay/channel/claude/dto.go

@@ -1,94 +1,95 @@
 package claude
 
-type ClaudeMetadata struct {
-	UserId string `json:"user_id"`
-}
-
-type ClaudeMediaMessage struct {
-	Type        string               `json:"type"`
-	Text        string               `json:"text,omitempty"`
-	Source      *ClaudeMessageSource `json:"source,omitempty"`
-	Usage       *ClaudeUsage         `json:"usage,omitempty"`
-	StopReason  *string              `json:"stop_reason,omitempty"`
-	PartialJson string               `json:"partial_json,omitempty"`
-	Thinking    string               `json:"thinking,omitempty"`
-	Signature   string               `json:"signature,omitempty"`
-	Delta       string               `json:"delta,omitempty"`
-	// tool_calls
-	Id        string `json:"id,omitempty"`
-	Name      string `json:"name,omitempty"`
-	Input     any    `json:"input,omitempty"`
-	Content   string `json:"content,omitempty"`
-	ToolUseId string `json:"tool_use_id,omitempty"`
-}
-
-type ClaudeMessageSource struct {
-	Type      string `json:"type"`
-	MediaType string `json:"media_type"`
-	Data      string `json:"data"`
-}
-
-type ClaudeMessage struct {
-	Role    string `json:"role"`
-	Content any    `json:"content"`
-}
-
-type Tool struct {
-	Name        string                 `json:"name"`
-	Description string                 `json:"description,omitempty"`
-	InputSchema map[string]interface{} `json:"input_schema"`
-}
-
-type InputSchema struct {
-	Type       string `json:"type"`
-	Properties any    `json:"properties,omitempty"`
-	Required   any    `json:"required,omitempty"`
-}
-
-type ClaudeRequest struct {
-	Model             string          `json:"model"`
-	Prompt            string          `json:"prompt,omitempty"`
-	System            string          `json:"system,omitempty"`
-	Messages          []ClaudeMessage `json:"messages,omitempty"`
-	MaxTokens         uint            `json:"max_tokens,omitempty"`
-	MaxTokensToSample uint            `json:"max_tokens_to_sample,omitempty"`
-	StopSequences     []string        `json:"stop_sequences,omitempty"`
-	Temperature       *float64        `json:"temperature,omitempty"`
-	TopP              float64         `json:"top_p,omitempty"`
-	TopK              int             `json:"top_k,omitempty"`
-	//ClaudeMetadata    `json:"metadata,omitempty"`
-	Stream     bool      `json:"stream,omitempty"`
-	Tools      any       `json:"tools,omitempty"`
-	ToolChoice any       `json:"tool_choice,omitempty"`
-	Thinking   *Thinking `json:"thinking,omitempty"`
-}
-
-type Thinking struct {
-	Type         string `json:"type"`
-	BudgetTokens int    `json:"budget_tokens"`
-}
-
-type ClaudeError struct {
-	Type    string `json:"type"`
-	Message string `json:"message"`
-}
-
-type ClaudeResponse struct {
-	Id           string               `json:"id"`
-	Type         string               `json:"type"`
-	Content      []ClaudeMediaMessage `json:"content"`
-	Completion   string               `json:"completion"`
-	StopReason   string               `json:"stop_reason"`
-	Model        string               `json:"model"`
-	Error        ClaudeError          `json:"error"`
-	Usage        ClaudeUsage          `json:"usage"`
-	Index        int                  `json:"index"` // stream only
-	ContentBlock *ClaudeMediaMessage  `json:"content_block"`
-	Delta        *ClaudeMediaMessage  `json:"delta"`   // stream only
-	Message      *ClaudeResponse      `json:"message"` // stream only: message_start
-}
-
-type ClaudeUsage struct {
-	InputTokens  int `json:"input_tokens"`
-	OutputTokens int `json:"output_tokens"`
-}
+//
+//type ClaudeMetadata struct {
+//	UserId string `json:"user_id"`
+//}
+//
+//type ClaudeMediaMessage struct {
+//	Type        string               `json:"type"`
+//	Text        string               `json:"text,omitempty"`
+//	Source      *ClaudeMessageSource `json:"source,omitempty"`
+//	Usage       *ClaudeUsage         `json:"usage,omitempty"`
+//	StopReason  *string              `json:"stop_reason,omitempty"`
+//	PartialJson string               `json:"partial_json,omitempty"`
+//	Thinking    string               `json:"thinking,omitempty"`
+//	Signature   string               `json:"signature,omitempty"`
+//	Delta       string               `json:"delta,omitempty"`
+//	// tool_calls
+//	Id        string `json:"id,omitempty"`
+//	Name      string `json:"name,omitempty"`
+//	Input     any    `json:"input,omitempty"`
+//	Content   string `json:"content,omitempty"`
+//	ToolUseId string `json:"tool_use_id,omitempty"`
+//}
+//
+//type ClaudeMessageSource struct {
+//	Type      string `json:"type"`
+//	MediaType string `json:"media_type"`
+//	Data      string `json:"data"`
+//}
+//
+//type ClaudeMessage struct {
+//	Role    string `json:"role"`
+//	Content any    `json:"content"`
+//}
+//
+//type Tool struct {
+//	Name        string                 `json:"name"`
+//	Description string                 `json:"description,omitempty"`
+//	InputSchema map[string]interface{} `json:"input_schema"`
+//}
+//
+//type InputSchema struct {
+//	Type       string `json:"type"`
+//	Properties any    `json:"properties,omitempty"`
+//	Required   any    `json:"required,omitempty"`
+//}
+//
+//type ClaudeRequest struct {
+//	Model             string          `json:"model"`
+//	Prompt            string          `json:"prompt,omitempty"`
+//	System            string          `json:"system,omitempty"`
+//	Messages          []ClaudeMessage `json:"messages,omitempty"`
+//	MaxTokens         uint            `json:"max_tokens,omitempty"`
+//	MaxTokensToSample uint            `json:"max_tokens_to_sample,omitempty"`
+//	StopSequences     []string        `json:"stop_sequences,omitempty"`
+//	Temperature       *float64        `json:"temperature,omitempty"`
+//	TopP              float64         `json:"top_p,omitempty"`
+//	TopK              int             `json:"top_k,omitempty"`
+//	//ClaudeMetadata    `json:"metadata,omitempty"`
+//	Stream     bool      `json:"stream,omitempty"`
+//	Tools      any       `json:"tools,omitempty"`
+//	ToolChoice any       `json:"tool_choice,omitempty"`
+//	Thinking   *Thinking `json:"thinking,omitempty"`
+//}
+//
+//type Thinking struct {
+//	Type         string `json:"type"`
+//	BudgetTokens int    `json:"budget_tokens"`
+//}
+//
+//type ClaudeError struct {
+//	Type    string `json:"type"`
+//	Message string `json:"message"`
+//}
+//
+//type ClaudeResponse struct {
+//	Id           string               `json:"id"`
+//	Type         string               `json:"type"`
+//	Content      []ClaudeMediaMessage `json:"content"`
+//	Completion   string               `json:"completion"`
+//	StopReason   string               `json:"stop_reason"`
+//	Model        string               `json:"model"`
+//	Error        ClaudeError          `json:"error"`
+//	Usage        ClaudeUsage          `json:"usage"`
+//	Index        int                  `json:"index"` // stream only
+//	ContentBlock *ClaudeMediaMessage  `json:"content_block"`
+//	Delta        *ClaudeMediaMessage  `json:"delta"`   // stream only
+//	Message      *ClaudeResponse      `json:"message"` // stream only: message_start
+//}
+//
+//type ClaudeUsage struct {
+//	InputTokens  int `json:"input_tokens"`
+//	OutputTokens int `json:"output_tokens"`
+//}

+ 173 - 86
relay/channel/claude/relay-claude.go

@@ -1,6 +1,7 @@
 package claude
 
 import (
+	"bytes"
 	"encoding/json"
 	"fmt"
 	"io"
@@ -29,9 +30,9 @@ func stopReasonClaude2OpenAI(reason string) string {
 	}
 }
 
-func RequestOpenAI2ClaudeComplete(textRequest dto.GeneralOpenAIRequest) *ClaudeRequest {
+func RequestOpenAI2ClaudeComplete(textRequest dto.GeneralOpenAIRequest) *dto.ClaudeRequest {
 
-	claudeRequest := ClaudeRequest{
+	claudeRequest := dto.ClaudeRequest{
 		Model:         textRequest.Model,
 		Prompt:        "",
 		StopSequences: nil,
@@ -60,12 +61,12 @@ func RequestOpenAI2ClaudeComplete(textRequest dto.GeneralOpenAIRequest) *ClaudeR
 	return &claudeRequest
 }
 
-func RequestOpenAI2ClaudeMessage(textRequest dto.GeneralOpenAIRequest) (*ClaudeRequest, error) {
-	claudeTools := make([]Tool, 0, len(textRequest.Tools))
+func RequestOpenAI2ClaudeMessage(textRequest dto.GeneralOpenAIRequest) (*dto.ClaudeRequest, error) {
+	claudeTools := make([]dto.Tool, 0, len(textRequest.Tools))
 
 	for _, tool := range textRequest.Tools {
 		if params, ok := tool.Function.Parameters.(map[string]any); ok {
-			claudeTool := Tool{
+			claudeTool := dto.Tool{
 				Name:        tool.Function.Name,
 				Description: tool.Function.Description,
 			}
@@ -83,7 +84,7 @@ func RequestOpenAI2ClaudeMessage(textRequest dto.GeneralOpenAIRequest) (*ClaudeR
 		}
 	}
 
-	claudeRequest := ClaudeRequest{
+	claudeRequest := dto.ClaudeRequest{
 		Model:         textRequest.Model,
 		MaxTokens:     textRequest.MaxTokens,
 		StopSequences: nil,
@@ -107,7 +108,7 @@ func RequestOpenAI2ClaudeMessage(textRequest dto.GeneralOpenAIRequest) (*ClaudeR
 		}
 
 		// BudgetTokens 为 max_tokens 的 80%
-		claudeRequest.Thinking = &Thinking{
+		claudeRequest.Thinking = &dto.Thinking{
 			Type:         "enabled",
 			BudgetTokens: int(float64(claudeRequest.MaxTokens) * model_setting.GetClaudeSettings().ThinkingAdapterBudgetTokensPercentage),
 		}
@@ -165,7 +166,7 @@ func RequestOpenAI2ClaudeMessage(textRequest dto.GeneralOpenAIRequest) (*ClaudeR
 		lastMessage = fmtMessage
 	}
 
-	claudeMessages := make([]ClaudeMessage, 0)
+	claudeMessages := make([]dto.ClaudeMessage, 0)
 	isFirstMessage := true
 	for _, message := range formatMessages {
 		if message.Role == "system" {
@@ -186,63 +187,63 @@ func RequestOpenAI2ClaudeMessage(textRequest dto.GeneralOpenAIRequest) (*ClaudeR
 				isFirstMessage = false
 				if message.Role != "user" {
 					// fix: first message is assistant, add user message
-					claudeMessage := ClaudeMessage{
+					claudeMessage := dto.ClaudeMessage{
 						Role: "user",
-						Content: []ClaudeMediaMessage{
+						Content: []dto.ClaudeMediaMessage{
 							{
 								Type: "text",
-								Text: "...",
+								Text: common.GetPointer[string]("..."),
 							},
 						},
 					}
 					claudeMessages = append(claudeMessages, claudeMessage)
 				}
 			}
-			claudeMessage := ClaudeMessage{
+			claudeMessage := dto.ClaudeMessage{
 				Role: message.Role,
 			}
 			if message.Role == "tool" {
 				if len(claudeMessages) > 0 && claudeMessages[len(claudeMessages)-1].Role == "user" {
 					lastMessage := claudeMessages[len(claudeMessages)-1]
 					if content, ok := lastMessage.Content.(string); ok {
-						lastMessage.Content = []ClaudeMediaMessage{
+						lastMessage.Content = []dto.ClaudeMediaMessage{
 							{
 								Type: "text",
-								Text: content,
+								Text: common.GetPointer[string](content),
 							},
 						}
 					}
-					lastMessage.Content = append(lastMessage.Content.([]ClaudeMediaMessage), ClaudeMediaMessage{
+					lastMessage.Content = append(lastMessage.Content.([]dto.ClaudeMediaMessage), dto.ClaudeMediaMessage{
 						Type:      "tool_result",
 						ToolUseId: message.ToolCallId,
-						Content:   message.StringContent(),
+						Content:   message.Content,
 					})
 					claudeMessages[len(claudeMessages)-1] = lastMessage
 					continue
 				} else {
 					claudeMessage.Role = "user"
-					claudeMessage.Content = []ClaudeMediaMessage{
+					claudeMessage.Content = []dto.ClaudeMediaMessage{
 						{
 							Type:      "tool_result",
 							ToolUseId: message.ToolCallId,
-							Content:   message.StringContent(),
+							Content:   message.Content,
 						},
 					}
 				}
 			} else if message.IsStringContent() && message.ToolCalls == nil {
 				claudeMessage.Content = message.StringContent()
 			} else {
-				claudeMediaMessages := make([]ClaudeMediaMessage, 0)
+				claudeMediaMessages := make([]dto.ClaudeMediaMessage, 0)
 				for _, mediaMessage := range message.ParseContent() {
-					claudeMediaMessage := ClaudeMediaMessage{
+					claudeMediaMessage := dto.ClaudeMediaMessage{
 						Type: mediaMessage.Type,
 					}
 					if mediaMessage.Type == "text" {
-						claudeMediaMessage.Text = mediaMessage.Text
+						claudeMediaMessage.Text = common.GetPointer[string](mediaMessage.Text)
 					} else {
 						imageUrl := mediaMessage.ImageUrl.(dto.MessageImageUrl)
 						claudeMediaMessage.Type = "image"
-						claudeMediaMessage.Source = &ClaudeMessageSource{
+						claudeMediaMessage.Source = &dto.ClaudeMessageSource{
 							Type: "base64",
 						}
 						// 判断是否是url
@@ -272,7 +273,7 @@ func RequestOpenAI2ClaudeMessage(textRequest dto.GeneralOpenAIRequest) (*ClaudeR
 							common.SysError("tool call function arguments is not a map[string]any: " + fmt.Sprintf("%v", toolCall.Function.Arguments))
 							continue
 						}
-						claudeMediaMessages = append(claudeMediaMessages, ClaudeMediaMessage{
+						claudeMediaMessages = append(claudeMediaMessages, dto.ClaudeMediaMessage{
 							Type:  "tool_use",
 							Id:    toolCall.ID,
 							Name:  toolCall.Function.Name,
@@ -290,9 +291,8 @@ func RequestOpenAI2ClaudeMessage(textRequest dto.GeneralOpenAIRequest) (*ClaudeR
 	return &claudeRequest, nil
 }
 
-func StreamResponseClaude2OpenAI(reqMode int, claudeResponse *ClaudeResponse) (*dto.ChatCompletionsStreamResponse, *ClaudeUsage) {
+func StreamResponseClaude2OpenAI(reqMode int, claudeResponse *dto.ClaudeResponse) *dto.ChatCompletionsStreamResponse {
 	var response dto.ChatCompletionsStreamResponse
-	var claudeUsage *ClaudeUsage
 	response.Object = "chat.completion.chunk"
 	response.Model = claudeResponse.Model
 	response.Choices = make([]dto.ChatCompletionsStreamResponseChoice, 0)
@@ -308,7 +308,7 @@ func StreamResponseClaude2OpenAI(reqMode int, claudeResponse *ClaudeResponse) (*
 		if claudeResponse.Type == "message_start" {
 			response.Id = claudeResponse.Message.Id
 			response.Model = claudeResponse.Message.Model
-			claudeUsage = &claudeResponse.Message.Usage
+			//claudeUsage = &claudeResponse.Message.Usage
 			choice.Delta.SetContentString("")
 			choice.Delta.Role = "assistant"
 		} else if claudeResponse.Type == "content_block_start" {
@@ -325,17 +325,17 @@ func StreamResponseClaude2OpenAI(reqMode int, claudeResponse *ClaudeResponse) (*
 					})
 				}
 			} else {
-				return nil, nil
+				return nil
 			}
 		} else if claudeResponse.Type == "content_block_delta" {
 			if claudeResponse.Delta != nil {
-				choice.Index = claudeResponse.Index
-				choice.Delta.SetContentString(claudeResponse.Delta.Text)
+				choice.Index = *claudeResponse.Index
+				choice.Delta.Content = claudeResponse.Delta.Text
 				switch claudeResponse.Delta.Type {
 				case "input_json_delta":
 					tools = append(tools, dto.ToolCallResponse{
 						Function: dto.FunctionResponse{
-							Arguments: claudeResponse.Delta.PartialJson,
+							Arguments: *claudeResponse.Delta.PartialJson,
 						},
 					})
 				case "signature_delta":
@@ -352,26 +352,23 @@ func StreamResponseClaude2OpenAI(reqMode int, claudeResponse *ClaudeResponse) (*
 			if finishReason != "null" {
 				choice.FinishReason = &finishReason
 			}
-			claudeUsage = &claudeResponse.Usage
+			//claudeUsage = &claudeResponse.Usage
 		} else if claudeResponse.Type == "message_stop" {
-			return nil, nil
+			return nil
 		} else {
-			return nil, nil
+			return nil
 		}
 	}
-	if claudeUsage == nil {
-		claudeUsage = &ClaudeUsage{}
-	}
 	if len(tools) > 0 {
 		choice.Delta.Content = nil // compatible with other OpenAI derivative applications, like LobeOpenAICompatibleFactory ...
 		choice.Delta.ToolCalls = tools
 	}
 	response.Choices = append(response.Choices, choice)
 
-	return &response, claudeUsage
+	return &response
 }
 
-func ResponseClaude2OpenAI(reqMode int, claudeResponse *ClaudeResponse) *dto.OpenAITextResponse {
+func ResponseClaude2OpenAI(reqMode int, claudeResponse *dto.ClaudeResponse) *dto.OpenAITextResponse {
 	choices := make([]dto.OpenAITextResponseChoice, 0)
 	fullTextResponse := dto.OpenAITextResponse{
 		Id:      fmt.Sprintf("chatcmpl-%s", common.GetUUID()),
@@ -379,8 +376,10 @@ func ResponseClaude2OpenAI(reqMode int, claudeResponse *ClaudeResponse) *dto.Ope
 		Created: common.GetTimestamp(),
 	}
 	var responseText string
+	var responseThinking string
 	if len(claudeResponse.Content) > 0 {
-		responseText = claudeResponse.Content[0].Text
+		responseText = claudeResponse.Content[0].GetText()
+		responseThinking = claudeResponse.Content[0].Thinking
 	}
 	tools := make([]dto.ToolCallResponse, 0)
 	thinkingContent := ""
@@ -415,7 +414,7 @@ func ResponseClaude2OpenAI(reqMode int, claudeResponse *ClaudeResponse) *dto.Ope
 				// 加密的不管, 只输出明文的推理过程
 				thinkingContent = message.Thinking
 			case "text":
-				responseText = message.Text
+				responseText = *message.Text
 			}
 		}
 	}
@@ -427,6 +426,9 @@ func ResponseClaude2OpenAI(reqMode int, claudeResponse *ClaudeResponse) *dto.Ope
 		FinishReason: stopReasonClaude2OpenAI(claudeResponse.StopReason),
 	}
 	choice.SetStringContent(responseText)
+	if len(responseThinking) > 0 {
+		choice.ReasoningContent = responseThinking
+	}
 	if len(tools) > 0 {
 		choice.Message.SetToolCalls(tools)
 	}
@@ -437,49 +439,124 @@ func ResponseClaude2OpenAI(reqMode int, claudeResponse *ClaudeResponse) *dto.Ope
 	return &fullTextResponse
 }
 
+type ClaudeResponseInfo struct {
+	ResponseId   string
+	Created      int64
+	Model        string
+	ResponseText strings.Builder
+	Usage        *dto.Usage
+}
+
+func FormatClaudeResponseInfo(requestMode int, claudeResponse *dto.ClaudeResponse, oaiResponse *dto.ChatCompletionsStreamResponse, claudeInfo *ClaudeResponseInfo) bool {
+	if requestMode == RequestModeCompletion {
+		claudeInfo.ResponseText.WriteString(claudeResponse.Completion)
+	} else {
+		if claudeResponse.Type == "message_start" {
+			// message_start, 获取usage
+			claudeInfo.ResponseId = claudeResponse.Message.Id
+			claudeInfo.Model = claudeResponse.Message.Model
+			claudeInfo.Usage.PromptTokens = claudeResponse.Message.Usage.InputTokens
+		} else if claudeResponse.Type == "content_block_delta" {
+			if claudeResponse.Delta.Text != nil {
+				claudeInfo.ResponseText.WriteString(*claudeResponse.Delta.Text)
+			}
+		} else if claudeResponse.Type == "message_delta" {
+			claudeInfo.Usage.CompletionTokens = claudeResponse.Usage.OutputTokens
+			if claudeResponse.Usage.InputTokens > 0 {
+				claudeInfo.Usage.PromptTokens = claudeResponse.Usage.InputTokens
+			}
+			claudeInfo.Usage.TotalTokens = claudeInfo.Usage.PromptTokens + claudeResponse.Usage.OutputTokens
+		} else if claudeResponse.Type == "content_block_start" {
+		} else {
+			return false
+		}
+	}
+	if oaiResponse != nil {
+		oaiResponse.Id = claudeInfo.ResponseId
+		oaiResponse.Created = claudeInfo.Created
+		oaiResponse.Model = claudeInfo.Model
+	}
+	return true
+}
+
 func ClaudeStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo, requestMode int) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
-	responseId := fmt.Sprintf("chatcmpl-%s", common.GetUUID())
-	var usage *dto.Usage
-	usage = &dto.Usage{}
-	responseText := ""
-	createdTime := common.GetTimestamp()
+
+	if info.RelayFormat == relaycommon.RelayFormatOpenAI {
+		return toOpenAIStreamHandler(c, resp, info, requestMode)
+	}
+
+	usage := &dto.Usage{}
+	responseText := strings.Builder{}
 
 	helper.StreamScannerHandler(c, resp, info, func(data string) bool {
-		var claudeResponse ClaudeResponse
-		err := json.Unmarshal([]byte(data), &claudeResponse)
+		var claudeResponse dto.ClaudeResponse
+		err := json.NewDecoder(bytes.NewReader(common.StringToByteSlice(data))).Decode(&claudeResponse)
 		if err != nil {
 			common.SysError("error unmarshalling stream response: " + err.Error())
 			return true
 		}
-
-		response, claudeUsage := StreamResponseClaude2OpenAI(requestMode, &claudeResponse)
-		if response == nil {
-			return true
-		}
 		if requestMode == RequestModeCompletion {
-			responseText += claudeResponse.Completion
-			responseId = response.Id
+			responseText.WriteString(claudeResponse.Completion)
 		} else {
 			if claudeResponse.Type == "message_start" {
 				// message_start, 获取usage
-				responseId = claudeResponse.Message.Id
 				info.UpstreamModelName = claudeResponse.Message.Model
-				usage.PromptTokens = claudeUsage.InputTokens
+				usage.PromptTokens = claudeResponse.Message.Usage.InputTokens
+				usage.PromptTokensDetails.CachedTokens = claudeResponse.Message.Usage.CacheReadInputTokens
+				usage.PromptTokensDetails.CachedCreationTokens = claudeResponse.Message.Usage.CacheCreationInputTokens
+				usage.CompletionTokens = claudeResponse.Message.Usage.OutputTokens
 			} else if claudeResponse.Type == "content_block_delta" {
-				responseText += claudeResponse.Delta.Text
+				responseText.WriteString(claudeResponse.Delta.GetText())
 			} else if claudeResponse.Type == "message_delta" {
-				usage.CompletionTokens = claudeUsage.OutputTokens
-				usage.TotalTokens = claudeUsage.InputTokens + claudeUsage.OutputTokens
-			} else if claudeResponse.Type == "content_block_start" {
-				return true
-			} else {
-				return true
+				if claudeResponse.Usage.InputTokens > 0 {
+					// 不叠加,只取最新的
+					usage.PromptTokens = claudeResponse.Usage.InputTokens
+				}
+				usage.CompletionTokens = claudeResponse.Usage.OutputTokens
+				usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens
 			}
 		}
-		//response.Id = responseId
-		response.Id = responseId
-		response.Created = createdTime
-		response.Model = info.UpstreamModelName
+		helper.ClaudeChunkData(c, claudeResponse, data)
+		return true
+	})
+
+	if requestMode == RequestModeCompletion {
+		usage, _ = service.ResponseText2Usage(responseText.String(), info.UpstreamModelName, info.PromptTokens)
+	} else {
+		// 说明流模式建立失败,可能为官方出错
+		if usage.PromptTokens == 0 {
+			//usage.PromptTokens = info.PromptTokens
+		}
+		if usage.CompletionTokens == 0 {
+			usage, _ = service.ResponseText2Usage(responseText.String(), info.UpstreamModelName, usage.PromptTokens)
+		}
+	}
+	return nil, usage
+}
+
+func toOpenAIStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo, requestMode int) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
+	responseId := fmt.Sprintf("chatcmpl-%s", common.GetUUID())
+	claudeInfo := &ClaudeResponseInfo{
+		ResponseId:   responseId,
+		Created:      common.GetTimestamp(),
+		Model:        info.UpstreamModelName,
+		ResponseText: strings.Builder{},
+		Usage:        &dto.Usage{},
+	}
+
+	helper.StreamScannerHandler(c, resp, info, func(data string) bool {
+		var claudeResponse dto.ClaudeResponse
+		err := json.NewDecoder(bytes.NewReader(common.StringToByteSlice(data))).Decode(&claudeResponse)
+		if err != nil {
+			common.SysError("error unmarshalling stream response: " + err.Error())
+			return true
+		}
+
+		response := StreamResponseClaude2OpenAI(requestMode, &claudeResponse)
+
+		if !FormatClaudeResponseInfo(requestMode, &claudeResponse, response, claudeInfo) {
+			return true
+		}
 
 		err = helper.ObjectData(c, response)
 		if err != nil {
@@ -489,25 +566,24 @@ func ClaudeStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.
 	})
 
 	if requestMode == RequestModeCompletion {
-		usage, _ = service.ResponseText2Usage(responseText, info.UpstreamModelName, info.PromptTokens)
+		claudeInfo.Usage, _ = service.ResponseText2Usage(claudeInfo.ResponseText.String(), info.UpstreamModelName, info.PromptTokens)
 	} else {
-		if usage.PromptTokens == 0 {
-			usage.PromptTokens = info.PromptTokens
+		if claudeInfo.Usage.PromptTokens == 0 {
+			//上游出错
 		}
-		if usage.CompletionTokens == 0 {
-			usage, _ = service.ResponseText2Usage(responseText, info.UpstreamModelName, usage.PromptTokens)
+		if claudeInfo.Usage.CompletionTokens == 0 {
+			claudeInfo.Usage, _ = service.ResponseText2Usage(claudeInfo.ResponseText.String(), info.UpstreamModelName, claudeInfo.Usage.PromptTokens)
 		}
 	}
 	if info.ShouldIncludeUsage {
-		response := helper.GenerateFinalUsageResponse(responseId, createdTime, info.UpstreamModelName, *usage)
+		response := helper.GenerateFinalUsageResponse(responseId, claudeInfo.Created, info.UpstreamModelName, *claudeInfo.Usage)
 		err := helper.ObjectData(c, response)
 		if err != nil {
 			common.SysError("send final response failed: " + err.Error())
 		}
 	}
 	helper.Done(c)
-	//resp.Body.Close()
-	return nil, usage
+	return nil, claudeInfo.Usage
 }
 
 func ClaudeHandler(c *gin.Context, resp *http.Response, requestMode int, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
@@ -519,7 +595,10 @@ func ClaudeHandler(c *gin.Context, resp *http.Response, requestMode int, info *r
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
 	}
-	var claudeResponse ClaudeResponse
+	if common.DebugEnabled {
+		println("responseBody: ", string(responseBody))
+	}
+	var claudeResponse dto.ClaudeResponse
 	err = json.Unmarshal(responseBody, &claudeResponse)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
@@ -535,13 +614,12 @@ func ClaudeHandler(c *gin.Context, resp *http.Response, requestMode int, info *r
 			StatusCode: resp.StatusCode,
 		}, nil
 	}
-	fullTextResponse := ResponseClaude2OpenAI(requestMode, &claudeResponse)
-	completionTokens, err := service.CountTextToken(claudeResponse.Completion, info.OriginModelName)
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "count_token_text_failed", http.StatusInternalServerError), nil
-	}
 	usage := dto.Usage{}
 	if requestMode == RequestModeCompletion {
+		completionTokens, err := service.CountTextToken(claudeResponse.Completion, info.OriginModelName)
+		if err != nil {
+			return service.OpenAIErrorWrapper(err, "count_token_text_failed", http.StatusInternalServerError), nil
+		}
 		usage.PromptTokens = info.PromptTokens
 		usage.CompletionTokens = completionTokens
 		usage.TotalTokens = info.PromptTokens + completionTokens
@@ -549,14 +627,23 @@ func ClaudeHandler(c *gin.Context, resp *http.Response, requestMode int, info *r
 		usage.PromptTokens = claudeResponse.Usage.InputTokens
 		usage.CompletionTokens = claudeResponse.Usage.OutputTokens
 		usage.TotalTokens = claudeResponse.Usage.InputTokens + claudeResponse.Usage.OutputTokens
+		usage.PromptTokensDetails.CachedTokens = claudeResponse.Usage.CacheReadInputTokens
+		usage.PromptTokensDetails.CachedCreationTokens = claudeResponse.Usage.CacheCreationInputTokens
 	}
-	fullTextResponse.Usage = usage
-	jsonResponse, err := json.Marshal(fullTextResponse)
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
+	var responseData []byte
+	switch info.RelayFormat {
+	case relaycommon.RelayFormatOpenAI:
+		openaiResponse := ResponseClaude2OpenAI(requestMode, &claudeResponse)
+		openaiResponse.Usage = usage
+		responseData, err = json.Marshal(openaiResponse)
+		if err != nil {
+			return service.OpenAIErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
+		}
+	case relaycommon.RelayFormatClaude:
+		responseData = responseBody
 	}
 	c.Writer.Header().Set("Content-Type", "application/json")
 	c.Writer.WriteHeader(resp.StatusCode)
-	_, err = c.Writer.Write(jsonResponse)
+	_, err = c.Writer.Write(responseData)
 	return nil, &usage
 }

+ 7 - 1
relay/channel/cloudflare/adaptor.go

@@ -17,6 +17,12 @@ import (
 type Adaptor struct {
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) Init(info *relaycommon.RelayInfo) {
 }
 
@@ -37,7 +43,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}

+ 7 - 2
relay/channel/cohere/adaptor.go

@@ -15,6 +15,12 @@ import (
 type Adaptor struct {
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -42,7 +48,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	return requestOpenAI2Cohere(*request), nil
 }
 
@@ -59,7 +65,6 @@ func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.Rela
 	return nil, errors.New("not implemented")
 }
 
-
 func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *dto.OpenAIErrorWithStatusCode) {
 	if info.RelayMode == constant.RelayModeRerank {
 		err, usage = cohereRerankHandler(c, resp, info)

+ 8 - 2
relay/channel/deepseek/adaptor.go

@@ -16,6 +16,12 @@ import (
 type Adaptor struct {
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -44,7 +50,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}
@@ -68,7 +74,7 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
 	if info.IsStream {
 		err, usage = openai.OaiStreamHandler(c, resp, info)
 	} else {
-		err, usage = openai.OpenaiHandler(c, resp, info.PromptTokens, info.UpstreamModelName)
+		err, usage = openai.OpenaiHandler(c, resp, info)
 	}
 	return
 }

+ 17 - 11
relay/channel/dify/adaptor.go

@@ -9,7 +9,6 @@ import (
 	"one-api/dto"
 	"one-api/relay/channel"
 	relaycommon "one-api/relay/common"
-	"strings"
 )
 
 const (
@@ -23,6 +22,12 @@ type Adaptor struct {
 	BotType int
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -34,15 +39,16 @@ func (a *Adaptor) ConvertImageRequest(c *gin.Context, info *relaycommon.RelayInf
 }
 
 func (a *Adaptor) Init(info *relaycommon.RelayInfo) {
-	if strings.HasPrefix(info.UpstreamModelName, "agent") {
-		a.BotType = BotTypeAgent
-	} else if strings.HasPrefix(info.UpstreamModelName, "workflow") {
-		a.BotType = BotTypeWorkFlow
-	} else if strings.HasPrefix(info.UpstreamModelName, "chat") {
-		a.BotType = BotTypeCompletion
-	} else {
-		a.BotType = BotTypeChatFlow
-	}
+	//if strings.HasPrefix(info.UpstreamModelName, "agent") {
+	//	a.BotType = BotTypeAgent
+	//} else if strings.HasPrefix(info.UpstreamModelName, "workflow") {
+	//	a.BotType = BotTypeWorkFlow
+	//} else if strings.HasPrefix(info.UpstreamModelName, "chat") {
+	//	a.BotType = BotTypeCompletion
+	//} else {
+	//}
+	a.BotType = BotTypeChatFlow
+
 }
 
 func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
@@ -64,7 +70,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}

+ 7 - 1
relay/channel/gemini/adaptor.go

@@ -21,6 +21,12 @@ import (
 type Adaptor struct {
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -89,7 +95,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}

+ 11 - 3
relay/channel/jina/adaptor.go

@@ -8,13 +8,21 @@ import (
 	"net/http"
 	"one-api/dto"
 	"one-api/relay/channel"
+	"one-api/relay/channel/openai"
 	relaycommon "one-api/relay/common"
+	"one-api/relay/common_handler"
 	"one-api/relay/constant"
 )
 
 type Adaptor struct {
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -43,7 +51,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	return request, nil
 }
 
@@ -61,9 +69,9 @@ func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.Rela
 
 func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *dto.OpenAIErrorWithStatusCode) {
 	if info.RelayMode == constant.RelayModeRerank {
-		err, usage = JinaRerankHandler(c, resp)
+		err, usage = common_handler.RerankHandler(c, resp)
 	} else if info.RelayMode == constant.RelayModeEmbeddings {
-		err, usage = jinaEmbeddingHandler(c, resp)
+		err, usage = openai.OpenaiHandler(c, resp, info)
 	}
 	return
 }

+ 0 - 59
relay/channel/jina/relay-jina.go

@@ -1,60 +1 @@
 package jina
-
-import (
-	"encoding/json"
-	"github.com/gin-gonic/gin"
-	"io"
-	"net/http"
-	"one-api/dto"
-	"one-api/service"
-)
-
-func JinaRerankHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
-	responseBody, err := io.ReadAll(resp.Body)
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
-	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
-	var jinaResp dto.RerankResponse
-	err = json.Unmarshal(responseBody, &jinaResp)
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
-	}
-
-	jsonResponse, err := json.Marshal(jinaResp)
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
-	}
-	c.Writer.Header().Set("Content-Type", "application/json")
-	c.Writer.WriteHeader(resp.StatusCode)
-	_, err = c.Writer.Write(jsonResponse)
-	return nil, &jinaResp.Usage
-}
-
-func jinaEmbeddingHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
-	responseBody, err := io.ReadAll(resp.Body)
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
-	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
-	var jinaResp dto.OpenAIEmbeddingResponse
-	err = json.Unmarshal(responseBody, &jinaResp)
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
-	}
-
-	jsonResponse, err := json.Marshal(jinaResp)
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
-	}
-	c.Writer.Header().Set("Content-Type", "application/json")
-	c.Writer.WriteHeader(resp.StatusCode)
-	_, err = c.Writer.Write(jsonResponse)
-	return nil, &jinaResp.Usage
-}

+ 8 - 2
relay/channel/mistral/adaptor.go

@@ -14,6 +14,12 @@ import (
 type Adaptor struct {
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -37,7 +43,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}
@@ -61,7 +67,7 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
 	if info.IsStream {
 		err, usage = openai.OaiStreamHandler(c, resp, info)
 	} else {
-		err, usage = openai.OpenaiHandler(c, resp, info.PromptTokens, info.UpstreamModelName)
+		err, usage = openai.OpenaiHandler(c, resp, info)
 	}
 	return
 }

+ 9 - 3
relay/channel/mokaai/adaptor.go

@@ -16,6 +16,12 @@ import (
 type Adaptor struct {
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -51,7 +57,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}
@@ -73,13 +79,13 @@ func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, request
 }
 
 func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *dto.OpenAIErrorWithStatusCode) {
-		
+
 	switch info.RelayMode {
 	case constant.RelayModeEmbeddings:
 		err, usage = mokaEmbeddingHandler(c, resp)
 	default:
 		// err, usage = mokaHandler(c, resp)
-		
+
 	}
 	return
 }

+ 8 - 2
relay/channel/ollama/adaptor.go

@@ -15,6 +15,12 @@ import (
 type Adaptor struct {
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -43,7 +49,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}
@@ -69,7 +75,7 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
 		if info.RelayMode == relayconstant.RelayModeEmbeddings {
 			err, usage = ollamaEmbeddingHandler(c, resp, info.PromptTokens, info.UpstreamModelName, info.RelayMode)
 		} else {
-			err, usage = openai.OpenaiHandler(c, resp, info.PromptTokens, info.UpstreamModelName)
+			err, usage = openai.OpenaiHandler(c, resp, info)
 		}
 	}
 	return

+ 49 - 9
relay/channel/openai/adaptor.go

@@ -5,7 +5,6 @@ import (
 	"encoding/json"
 	"errors"
 	"fmt"
-	"github.com/gin-gonic/gin"
 	"io"
 	"mime/multipart"
 	"net/http"
@@ -14,13 +13,18 @@ import (
 	"one-api/dto"
 	"one-api/relay/channel"
 	"one-api/relay/channel/ai360"
-	"one-api/relay/channel/jina"
 	"one-api/relay/channel/lingyiwanwu"
 	"one-api/relay/channel/minimax"
 	"one-api/relay/channel/moonshot"
+	"one-api/relay/channel/openrouter"
+	"one-api/relay/channel/xinference"
 	relaycommon "one-api/relay/common"
+	"one-api/relay/common_handler"
 	"one-api/relay/constant"
+	"one-api/service"
 	"strings"
+
+	"github.com/gin-gonic/gin"
 )
 
 type Adaptor struct {
@@ -28,11 +32,39 @@ type Adaptor struct {
 	ResponseFormat string
 }
 
+func (a *Adaptor) ConvertClaudeRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.ClaudeRequest) (any, error) {
+	if !strings.Contains(request.Model, "claude") {
+		return nil, fmt.Errorf("you are using openai channel type with path /v1/messages, only claude model supported convert, but got %s", request.Model)
+	}
+	aiRequest, err := service.ClaudeToOpenAIRequest(*request)
+	if err != nil {
+		return nil, err
+	}
+	if info.SupportStreamOptions {
+		aiRequest.StreamOptions = &dto.StreamOptions{
+			IncludeUsage: true,
+		}
+	}
+	return a.ConvertOpenAIRequest(c, info, aiRequest)
+}
+
 func (a *Adaptor) Init(info *relaycommon.RelayInfo) {
 	a.ChannelType = info.ChannelType
+
+	// initialize ThinkingContentInfo when thinking_to_content is enabled
+	if think2Content, ok := info.ChannelSetting[constant2.ChannelSettingThinkingToContent].(bool); ok && think2Content {
+		info.ThinkingContentInfo = relaycommon.ThinkingContentInfo{
+			IsFirstThinkingContent:  true,
+			SendLastThinkingContent: false,
+			HasSentThinkingContent:  false,
+		}
+	}
 }
 
 func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
+	if info.RelayFormat == relaycommon.RelayFormatClaude {
+		return fmt.Sprintf("%s/v1/chat/completions", info.BaseUrl), nil
+	}
 	if info.RelayMode == constant.RelayModeRealtime {
 		if strings.HasPrefix(info.BaseUrl, "https://") {
 			baseUrl := strings.TrimPrefix(info.BaseUrl, "https://")
@@ -101,14 +133,14 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, header *http.Header, info *
 	} else {
 		header.Set("Authorization", "Bearer "+info.ApiKey)
 	}
-	//if info.ChannelType == common.ChannelTypeOpenRouter {
-	//	req.Header.Set("HTTP-Referer", "https://github.com/songquanpeng/one-api")
-	//	req.Header.Set("X-Title", "One API")
-	//}
+	if info.ChannelType == common.ChannelTypeOpenRouter {
+		header.Set("HTTP-Referer", "https://github.com/Calcium-Ion/new-api")
+		header.Set("X-Title", "New API")
+	}
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}
@@ -230,12 +262,12 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
 	case constant.RelayModeImagesGenerations:
 		err, usage = OpenaiTTSHandler(c, resp, info)
 	case constant.RelayModeRerank:
-		err, usage = jina.JinaRerankHandler(c, resp)
+		err, usage = common_handler.RerankHandler(c, resp)
 	default:
 		if info.IsStream {
 			err, usage = OaiStreamHandler(c, resp, info)
 		} else {
-			err, usage = OpenaiHandler(c, resp, info.PromptTokens, info.UpstreamModelName)
+			err, usage = OpenaiHandler(c, resp, info)
 		}
 	}
 	return
@@ -251,6 +283,10 @@ func (a *Adaptor) GetModelList() []string {
 		return lingyiwanwu.ModelList
 	case common.ChannelTypeMiniMax:
 		return minimax.ModelList
+	case common.ChannelTypeXinference:
+		return xinference.ModelList
+	case common.ChannelTypeOpenRouter:
+		return openrouter.ModelList
 	default:
 		return ModelList
 	}
@@ -266,6 +302,10 @@ func (a *Adaptor) GetChannelName() string {
 		return lingyiwanwu.ChannelName
 	case common.ChannelTypeMiniMax:
 		return minimax.ChannelName
+	case common.ChannelTypeXinference:
+		return xinference.ChannelName
+	case common.ChannelTypeOpenRouter:
+		return openrouter.ChannelName
 	default:
 		return ChannelName
 	}

+ 188 - 0
relay/channel/openai/helper.go

@@ -0,0 +1,188 @@
+package openai
+
+import (
+	"encoding/json"
+	"one-api/common"
+	"one-api/dto"
+	relaycommon "one-api/relay/common"
+	relayconstant "one-api/relay/constant"
+	"one-api/relay/helper"
+	"one-api/service"
+	"strings"
+
+	"github.com/gin-gonic/gin"
+)
+
+// 辅助函数
+func handleStreamFormat(c *gin.Context, info *relaycommon.RelayInfo, data string, forceFormat bool, thinkToContent bool) error {
+	info.SendResponseCount++
+	switch info.RelayFormat {
+	case relaycommon.RelayFormatOpenAI:
+		return sendStreamData(c, info, data, forceFormat, thinkToContent)
+	case relaycommon.RelayFormatClaude:
+		return handleClaudeFormat(c, data, info)
+	}
+	return nil
+}
+
+func handleClaudeFormat(c *gin.Context, data string, info *relaycommon.RelayInfo) error {
+	var streamResponse dto.ChatCompletionsStreamResponse
+	if err := json.Unmarshal(common.StringToByteSlice(data), &streamResponse); err != nil {
+		return err
+	}
+
+	claudeResponses := service.StreamResponseOpenAI2Claude(&streamResponse, info)
+	for _, resp := range claudeResponses {
+		helper.ClaudeData(c, *resp)
+	}
+	return nil
+}
+
+func processStreamResponse(item string, responseTextBuilder *strings.Builder, toolCount *int) error {
+	var streamResponse dto.ChatCompletionsStreamResponse
+	if err := json.Unmarshal(common.StringToByteSlice(item), &streamResponse); err != nil {
+		return err
+	}
+
+	for _, choice := range streamResponse.Choices {
+		responseTextBuilder.WriteString(choice.Delta.GetContentString())
+		responseTextBuilder.WriteString(choice.Delta.GetReasoningContent())
+		if choice.Delta.ToolCalls != nil {
+			if len(choice.Delta.ToolCalls) > *toolCount {
+				*toolCount = len(choice.Delta.ToolCalls)
+			}
+			for _, tool := range choice.Delta.ToolCalls {
+				responseTextBuilder.WriteString(tool.Function.Name)
+				responseTextBuilder.WriteString(tool.Function.Arguments)
+			}
+		}
+	}
+	return nil
+}
+
+func processTokens(relayMode int, streamItems []string, responseTextBuilder *strings.Builder, toolCount *int) error {
+	streamResp := "[" + strings.Join(streamItems, ",") + "]"
+
+	switch relayMode {
+	case relayconstant.RelayModeChatCompletions:
+		return processChatCompletions(streamResp, streamItems, responseTextBuilder, toolCount)
+	case relayconstant.RelayModeCompletions:
+		return processCompletions(streamResp, streamItems, responseTextBuilder)
+	}
+	return nil
+}
+
+func processChatCompletions(streamResp string, streamItems []string, responseTextBuilder *strings.Builder, toolCount *int) error {
+	var streamResponses []dto.ChatCompletionsStreamResponse
+	if err := json.Unmarshal(common.StringToByteSlice(streamResp), &streamResponses); err != nil {
+		// 一次性解析失败,逐个解析
+		common.SysError("error unmarshalling stream response: " + err.Error())
+		for _, item := range streamItems {
+			if err := processStreamResponse(item, responseTextBuilder, toolCount); err != nil {
+				common.SysError("error processing stream response: " + err.Error())
+			}
+		}
+		return nil
+	}
+
+	// 批量处理所有响应
+	for _, streamResponse := range streamResponses {
+		for _, choice := range streamResponse.Choices {
+			responseTextBuilder.WriteString(choice.Delta.GetContentString())
+			responseTextBuilder.WriteString(choice.Delta.GetReasoningContent())
+			if choice.Delta.ToolCalls != nil {
+				if len(choice.Delta.ToolCalls) > *toolCount {
+					*toolCount = len(choice.Delta.ToolCalls)
+				}
+				for _, tool := range choice.Delta.ToolCalls {
+					responseTextBuilder.WriteString(tool.Function.Name)
+					responseTextBuilder.WriteString(tool.Function.Arguments)
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func processCompletions(streamResp string, streamItems []string, responseTextBuilder *strings.Builder) error {
+	var streamResponses []dto.CompletionsStreamResponse
+	if err := json.Unmarshal(common.StringToByteSlice(streamResp), &streamResponses); err != nil {
+		// 一次性解析失败,逐个解析
+		common.SysError("error unmarshalling stream response: " + err.Error())
+		for _, item := range streamItems {
+			var streamResponse dto.CompletionsStreamResponse
+			if err := json.Unmarshal(common.StringToByteSlice(item), &streamResponse); err != nil {
+				continue
+			}
+			for _, choice := range streamResponse.Choices {
+				responseTextBuilder.WriteString(choice.Text)
+			}
+		}
+		return nil
+	}
+
+	// 批量处理所有响应
+	for _, streamResponse := range streamResponses {
+		for _, choice := range streamResponse.Choices {
+			responseTextBuilder.WriteString(choice.Text)
+		}
+	}
+	return nil
+}
+
+func handleLastResponse(lastStreamData string, responseId *string, createAt *int64,
+	systemFingerprint *string, model *string, usage **dto.Usage,
+	containStreamUsage *bool, info *relaycommon.RelayInfo,
+	shouldSendLastResp *bool) error {
+
+	var lastStreamResponse dto.ChatCompletionsStreamResponse
+	if err := json.Unmarshal(common.StringToByteSlice(lastStreamData), &lastStreamResponse); err != nil {
+		return err
+	}
+
+	*responseId = lastStreamResponse.Id
+	*createAt = lastStreamResponse.Created
+	*systemFingerprint = lastStreamResponse.GetSystemFingerprint()
+	*model = lastStreamResponse.Model
+
+	if service.ValidUsage(lastStreamResponse.Usage) {
+		*containStreamUsage = true
+		*usage = lastStreamResponse.Usage
+		if !info.ShouldIncludeUsage {
+			*shouldSendLastResp = false
+		}
+	}
+
+	return nil
+}
+
+func handleFinalResponse(c *gin.Context, info *relaycommon.RelayInfo, lastStreamData string,
+	responseId string, createAt int64, model string, systemFingerprint string,
+	usage *dto.Usage, containStreamUsage bool) {
+
+	switch info.RelayFormat {
+	case relaycommon.RelayFormatOpenAI:
+		if info.ShouldIncludeUsage && !containStreamUsage {
+			response := helper.GenerateFinalUsageResponse(responseId, createAt, model, *usage)
+			response.SetSystemFingerprint(systemFingerprint)
+			helper.ObjectData(c, response)
+		}
+		helper.Done(c)
+
+	case relaycommon.RelayFormatClaude:
+		var streamResponse dto.ChatCompletionsStreamResponse
+		if err := json.Unmarshal(common.StringToByteSlice(lastStreamData), &streamResponse); err != nil {
+			common.SysError("error unmarshalling stream response: " + err.Error())
+			return
+		}
+
+		if !containStreamUsage {
+			streamResponse.Usage = usage
+		}
+
+		claudeResponses := service.StreamResponseOpenAI2Claude(&streamResponse, info)
+		for _, resp := range claudeResponses {
+			helper.ClaudeData(c, *resp)
+		}
+	}
+}

+ 15 - 94
relay/channel/openai/relay-openai.go

@@ -12,7 +12,6 @@ import (
 	"one-api/constant"
 	"one-api/dto"
 	relaycommon "one-api/relay/common"
-	relayconstant "one-api/relay/constant"
 	"one-api/relay/helper"
 	"one-api/service"
 	"os"
@@ -66,6 +65,7 @@ func sendStreamData(c *gin.Context, info *relaycommon.RelayInfo, data string, fo
 				response.Choices[i].Delta.Reasoning = nil
 			}
 			info.ThinkingContentInfo.IsFirstThinkingContent = false
+			info.ThinkingContentInfo.HasSentThinkingContent = true
 			return helper.ObjectData(c, response)
 		}
 	}
@@ -77,7 +77,8 @@ func sendStreamData(c *gin.Context, info *relaycommon.RelayInfo, data string, fo
 	// Process each choice
 	for i, choice := range lastStreamResponse.Choices {
 		// Handle transition from thinking to content
-		if hasContent && !info.ThinkingContentInfo.SendLastThinkingContent {
+		// only send `</think>` tag when previous thinking content has been sent
+		if hasContent && !info.ThinkingContentInfo.SendLastThinkingContent && info.ThinkingContentInfo.HasSentThinkingContent {
 			response := lastStreamResponse.Copy()
 			for j := range response.Choices {
 				response.Choices[j].Delta.SetContentString("\n</think>\n")
@@ -88,7 +89,7 @@ func sendStreamData(c *gin.Context, info *relaycommon.RelayInfo, data string, fo
 			helper.ObjectData(c, response)
 		}
 
-		// Convert reasoning content to regular content
+		// Convert reasoning content to regular content if any
 		if len(choice.Delta.GetReasoningContent()) > 0 {
 			lastStreamResponse.Choices[i].Delta.SetContentString(choice.Delta.GetReasoningContent())
 			lastStreamResponse.Choices[i].Delta.ReasoningContent = nil
@@ -137,10 +138,11 @@ func OaiStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
 
 	helper.StreamScannerHandler(c, resp, info, func(data string) bool {
 		if lastStreamData != "" {
-			err := sendStreamData(c, info, lastStreamData, forceFormat, thinkToContent)
+			err := handleStreamFormat(c, info, lastStreamData, forceFormat, thinkToContent)
 			if err != nil {
-				common.LogError(c, "streaming error: "+err.Error())
+				common.SysError("error handling stream format: " + err.Error())
 			}
+			info.SetFirstResponseTime()
 		}
 		lastStreamData = data
 		streamItems = append(streamItems, data)
@@ -172,83 +174,9 @@ func OaiStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
 		sendStreamData(c, info, lastStreamData, forceFormat, thinkToContent)
 	}
 
-	// 计算token
-	streamResp := "[" + strings.Join(streamItems, ",") + "]"
-	switch info.RelayMode {
-	case relayconstant.RelayModeChatCompletions:
-		var streamResponses []dto.ChatCompletionsStreamResponse
-		err := json.Unmarshal(common.StringToByteSlice(streamResp), &streamResponses)
-		if err != nil {
-			// 一次性解析失败,逐个解析
-			common.SysError("error unmarshalling stream response: " + err.Error())
-			for _, item := range streamItems {
-				var streamResponse dto.ChatCompletionsStreamResponse
-				err := json.Unmarshal(common.StringToByteSlice(item), &streamResponse)
-				if err == nil {
-					//if service.ValidUsage(streamResponse.Usage) {
-					//	usage = streamResponse.Usage
-					//}
-					for _, choice := range streamResponse.Choices {
-						responseTextBuilder.WriteString(choice.Delta.GetContentString())
-
-						// handle both reasoning_content and reasoning
-						responseTextBuilder.WriteString(choice.Delta.GetReasoningContent())
-
-						if choice.Delta.ToolCalls != nil {
-							if len(choice.Delta.ToolCalls) > toolCount {
-								toolCount = len(choice.Delta.ToolCalls)
-							}
-							for _, tool := range choice.Delta.ToolCalls {
-								responseTextBuilder.WriteString(tool.Function.Name)
-								responseTextBuilder.WriteString(tool.Function.Arguments)
-							}
-						}
-					}
-				}
-			}
-		} else {
-			for _, streamResponse := range streamResponses {
-				//if service.ValidUsage(streamResponse.Usage) {
-				//	usage = streamResponse.Usage
-				//	containStreamUsage = true
-				//}
-				for _, choice := range streamResponse.Choices {
-					responseTextBuilder.WriteString(choice.Delta.GetContentString())
-					responseTextBuilder.WriteString(choice.Delta.GetReasoningContent()) // This will handle both reasoning_content and reasoning
-					if choice.Delta.ToolCalls != nil {
-						if len(choice.Delta.ToolCalls) > toolCount {
-							toolCount = len(choice.Delta.ToolCalls)
-						}
-						for _, tool := range choice.Delta.ToolCalls {
-							responseTextBuilder.WriteString(tool.Function.Name)
-							responseTextBuilder.WriteString(tool.Function.Arguments)
-						}
-					}
-				}
-			}
-		}
-	case relayconstant.RelayModeCompletions:
-		var streamResponses []dto.CompletionsStreamResponse
-		err := json.Unmarshal(common.StringToByteSlice(streamResp), &streamResponses)
-		if err != nil {
-			// 一次性解析失败,逐个解析
-			common.SysError("error unmarshalling stream response: " + err.Error())
-			for _, item := range streamItems {
-				var streamResponse dto.CompletionsStreamResponse
-				err := json.Unmarshal(common.StringToByteSlice(item), &streamResponse)
-				if err == nil {
-					for _, choice := range streamResponse.Choices {
-						responseTextBuilder.WriteString(choice.Text)
-					}
-				}
-			}
-		} else {
-			for _, streamResponse := range streamResponses {
-				for _, choice := range streamResponse.Choices {
-					responseTextBuilder.WriteString(choice.Text)
-				}
-			}
-		}
+	// 处理token计算
+	if err := processTokens(info.RelayMode, streamItems, &responseTextBuilder, &toolCount); err != nil {
+		common.SysError("error processing tokens: " + err.Error())
 	}
 
 	if !containStreamUsage {
@@ -262,19 +190,12 @@ func OaiStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
 		}
 	}
 
-	if info.ShouldIncludeUsage && !containStreamUsage {
-		response := helper.GenerateFinalUsageResponse(responseId, createAt, model, *usage)
-		response.SetSystemFingerprint(systemFingerprint)
-		helper.ObjectData(c, response)
-	}
-
-	helper.Done(c)
+	handleFinalResponse(c, info, lastStreamData, responseId, createAt, model, systemFingerprint, usage, containStreamUsage)
 
-	//resp.Body.Close()
 	return nil, usage
 }
 
-func OpenaiHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
+func OpenaiHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
 	var simpleResponse dto.SimpleResponse
 	responseBody, err := io.ReadAll(resp.Body)
 	if err != nil {
@@ -312,13 +233,13 @@ func OpenaiHandler(c *gin.Context, resp *http.Response, promptTokens int, model
 	if simpleResponse.Usage.TotalTokens == 0 || (simpleResponse.Usage.PromptTokens == 0 && simpleResponse.Usage.CompletionTokens == 0) {
 		completionTokens := 0
 		for _, choice := range simpleResponse.Choices {
-			ctkm, _ := service.CountTextToken(choice.Message.StringContent()+choice.Message.ReasoningContent+choice.Message.Reasoning, model)
+			ctkm, _ := service.CountTextToken(choice.Message.StringContent()+choice.Message.ReasoningContent+choice.Message.Reasoning, info.UpstreamModelName)
 			completionTokens += ctkm
 		}
 		simpleResponse.Usage = dto.Usage{
-			PromptTokens:     promptTokens,
+			PromptTokens:     info.PromptTokens,
 			CompletionTokens: completionTokens,
-			TotalTokens:      promptTokens + completionTokens,
+			TotalTokens:      info.PromptTokens + completionTokens,
 		}
 	}
 	return nil, &simpleResponse.Usage

+ 0 - 74
relay/channel/openrouter/adaptor.go

@@ -1,74 +0,0 @@
-package openrouter
-
-import (
-	"errors"
-	"fmt"
-	"github.com/gin-gonic/gin"
-	"io"
-	"net/http"
-	"one-api/dto"
-	"one-api/relay/channel"
-	"one-api/relay/channel/openai"
-	relaycommon "one-api/relay/common"
-)
-
-type Adaptor struct {
-}
-
-func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
-	//TODO implement me
-	return nil, errors.New("not implemented")
-}
-
-func (a *Adaptor) ConvertImageRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.ImageRequest) (any, error) {
-	//TODO implement me
-	return nil, errors.New("not implemented")
-}
-
-func (a *Adaptor) Init(info *relaycommon.RelayInfo) {
-}
-
-func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
-	return fmt.Sprintf("%s/v1/chat/completions", info.BaseUrl), nil
-}
-
-func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *relaycommon.RelayInfo) error {
-	channel.SetupApiRequestHeader(info, c, req)
-	req.Set("Authorization", fmt.Sprintf("Bearer %s", info.ApiKey))
-	req.Set("HTTP-Referer", "https://github.com/Calcium-Ion/new-api")
-	req.Set("X-Title", "New API")
-	return nil
-}
-
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
-	return request, nil
-}
-
-func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, requestBody io.Reader) (any, error) {
-	return channel.DoApiRequest(a, c, info, requestBody)
-}
-
-func (a *Adaptor) ConvertRerankRequest(c *gin.Context, relayMode int, request dto.RerankRequest) (any, error) {
-	return nil, errors.New("not implemented")
-}
-
-func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.EmbeddingRequest) (any, error) {
-	return nil, errors.New("not implemented")
-}
-
-func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *dto.OpenAIErrorWithStatusCode) {
-	if info.IsStream {
-		err, usage = openai.OaiStreamHandler(c, resp, info)
-	} else {
-		err, usage = openai.OpenaiHandler(c, resp, info.PromptTokens, info.UpstreamModelName)
-	}
-	return
-}
-
-func (a *Adaptor) GetModelList() []string {
-	return ModelList
-}
-
-func (a *Adaptor) GetChannelName() string {
-	return ChannelName
-}

+ 7 - 2
relay/channel/palm/adaptor.go

@@ -15,6 +15,12 @@ import (
 type Adaptor struct {
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -38,7 +44,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}
@@ -54,7 +60,6 @@ func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.Rela
 	return nil, errors.New("not implemented")
 }
 
-
 func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, requestBody io.Reader) (any, error) {
 	return channel.DoApiRequest(a, c, info, requestBody)
 }

+ 8 - 3
relay/channel/perplexity/adaptor.go

@@ -15,6 +15,12 @@ import (
 type Adaptor struct {
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -38,7 +44,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}
@@ -57,7 +63,6 @@ func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.Rela
 	return nil, errors.New("not implemented")
 }
 
-
 func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, requestBody io.Reader) (any, error) {
 	return channel.DoApiRequest(a, c, info, requestBody)
 }
@@ -66,7 +71,7 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
 	if info.IsStream {
 		err, usage = openai.OaiStreamHandler(c, resp, info)
 	} else {
-		err, usage = openai.OpenaiHandler(c, resp, info.PromptTokens, info.UpstreamModelName)
+		err, usage = openai.OpenaiHandler(c, resp, info)
 	}
 	return
 }

+ 10 - 4
relay/channel/siliconflow/adaptor.go

@@ -16,6 +16,12 @@ import (
 type Adaptor struct {
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -48,7 +54,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	return request, nil
 }
 
@@ -72,16 +78,16 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
 		if info.IsStream {
 			err, usage = openai.OaiStreamHandler(c, resp, info)
 		} else {
-			err, usage = openai.OpenaiHandler(c, resp, info.PromptTokens, info.UpstreamModelName)
+			err, usage = openai.OpenaiHandler(c, resp, info)
 		}
 	case constant.RelayModeCompletions:
 		if info.IsStream {
 			err, usage = openai.OaiStreamHandler(c, resp, info)
 		} else {
-			err, usage = openai.OpenaiHandler(c, resp, info.PromptTokens, info.UpstreamModelName)
+			err, usage = openai.OpenaiHandler(c, resp, info)
 		}
 	case constant.RelayModeEmbeddings:
-		err, usage = openai.OpenaiHandler(c, resp, info.PromptTokens, info.UpstreamModelName)
+		err, usage = openai.OpenaiHandler(c, resp, info)
 	}
 	return
 }

+ 7 - 2
relay/channel/tencent/adaptor.go

@@ -23,6 +23,12 @@ type Adaptor struct {
 	Timestamp int64
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -52,7 +58,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}
@@ -78,7 +84,6 @@ func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.Rela
 	return nil, errors.New("not implemented")
 }
 
-
 func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, requestBody io.Reader) (any, error) {
 	return channel.DoApiRequest(a, c, info, requestBody)
 }

+ 5 - 2
relay/channel/vertex/adaptor.go

@@ -38,6 +38,9 @@ type Adaptor struct {
 	AccountCredentials Credentials
 }
 
+func (a *Adaptor) ConvertClaudeRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.ClaudeRequest) (any, error) {
+	return request, nil
+}
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -119,7 +122,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}
@@ -175,7 +178,7 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
 		case RequestModeGemini:
 			err, usage = gemini.GeminiChatHandler(c, resp, info)
 		case RequestModeLlama:
-			err, usage = openai.OpenaiHandler(c, resp, info.PromptTokens, info.OriginModelName)
+			err, usage = openai.OpenaiHandler(c, resp, info)
 		}
 	}
 	return

+ 14 - 14
relay/channel/vertex/dto.go

@@ -1,25 +1,25 @@
 package vertex
 
 import (
-	"one-api/relay/channel/claude"
+	"one-api/dto"
 )
 
 type VertexAIClaudeRequest struct {
-	AnthropicVersion string                 `json:"anthropic_version"`
-	Messages         []claude.ClaudeMessage `json:"messages"`
-	System           any                    `json:"system,omitempty"`
-	MaxTokens        uint                   `json:"max_tokens,omitempty"`
-	StopSequences    []string               `json:"stop_sequences,omitempty"`
-	Stream           bool                   `json:"stream,omitempty"`
-	Temperature      *float64               `json:"temperature,omitempty"`
-	TopP             float64                `json:"top_p,omitempty"`
-	TopK             int                    `json:"top_k,omitempty"`
-	Tools            any                    `json:"tools,omitempty"`
-	ToolChoice       any                    `json:"tool_choice,omitempty"`
-	Thinking         *claude.Thinking       `json:"thinking,omitempty"`
+	AnthropicVersion string              `json:"anthropic_version"`
+	Messages         []dto.ClaudeMessage `json:"messages"`
+	System           any                 `json:"system,omitempty"`
+	MaxTokens        uint                `json:"max_tokens,omitempty"`
+	StopSequences    []string            `json:"stop_sequences,omitempty"`
+	Stream           bool                `json:"stream,omitempty"`
+	Temperature      *float64            `json:"temperature,omitempty"`
+	TopP             float64             `json:"top_p,omitempty"`
+	TopK             int                 `json:"top_k,omitempty"`
+	Tools            any                 `json:"tools,omitempty"`
+	ToolChoice       any                 `json:"tool_choice,omitempty"`
+	Thinking         *dto.Thinking       `json:"thinking,omitempty"`
 }
 
-func copyRequest(req *claude.ClaudeRequest, version string) *VertexAIClaudeRequest {
+func copyRequest(req *dto.ClaudeRequest, version string) *VertexAIClaudeRequest {
 	return &VertexAIClaudeRequest{
 		AnthropicVersion: version,
 		System:           req.System,

+ 9 - 3
relay/channel/volcengine/adaptor.go

@@ -17,6 +17,12 @@ import (
 type Adaptor struct {
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -50,7 +56,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}
@@ -75,10 +81,10 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
 		if info.IsStream {
 			err, usage = openai.OaiStreamHandler(c, resp, info)
 		} else {
-			err, usage = openai.OpenaiHandler(c, resp, info.PromptTokens, info.UpstreamModelName)
+			err, usage = openai.OpenaiHandler(c, resp, info)
 		}
 	case constant.RelayModeEmbeddings:
-		err, usage = openai.OpenaiHandler(c, resp, info.PromptTokens, info.UpstreamModelName)
+		err, usage = openai.OpenaiHandler(c, resp, info)
 	}
 	return
 }

+ 7 - 0
relay/channel/xinference/constant.go

@@ -0,0 +1,7 @@
+package xinference
+
+var ModelList = []string{
+	"bge-reranker-v2-m3",
+}
+
+var ChannelName = "xinference"

+ 7 - 2
relay/channel/xunfei/adaptor.go

@@ -16,6 +16,12 @@ type Adaptor struct {
 	request *dto.GeneralOpenAIRequest
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -38,7 +44,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}
@@ -55,7 +61,6 @@ func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.Rela
 	return nil, errors.New("not implemented")
 }
 
-
 func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, requestBody io.Reader) (any, error) {
 	// xunfei's request is not http request, so we don't need to do anything here
 	dummyResp := &http.Response{}

+ 7 - 2
relay/channel/zhipu/adaptor.go

@@ -14,6 +14,12 @@ import (
 type Adaptor struct {
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -42,7 +48,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}
@@ -61,7 +67,6 @@ func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.Rela
 	return nil, errors.New("not implemented")
 }
 
-
 func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, requestBody io.Reader) (any, error) {
 	return channel.DoApiRequest(a, c, info, requestBody)
 }

+ 8 - 3
relay/channel/zhipu_4v/adaptor.go

@@ -15,6 +15,12 @@ import (
 type Adaptor struct {
 }
 
+func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
+	//TODO implement me
+	panic("implement me")
+	return nil, nil
+}
+
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	//TODO implement me
 	return nil, errors.New("not implemented")
@@ -39,7 +45,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel
 	return nil
 }
 
-func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
+func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}
@@ -58,7 +64,6 @@ func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.Rela
 	return nil, errors.New("not implemented")
 }
 
-
 func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, requestBody io.Reader) (any, error) {
 	return channel.DoApiRequest(a, c, info, requestBody)
 }
@@ -67,7 +72,7 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
 	if info.IsStream {
 		err, usage = openai.OaiStreamHandler(c, resp, info)
 	} else {
-		err, usage = openai.OpenaiHandler(c, resp, info.PromptTokens, info.UpstreamModelName)
+		err, usage = openai.OpenaiHandler(c, resp, info)
 	}
 	return
 }

+ 163 - 0
relay/claude_handler.go

@@ -0,0 +1,163 @@
+package relay
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"github.com/gin-gonic/gin"
+	"io"
+	"net/http"
+	"one-api/common"
+	"one-api/dto"
+	relaycommon "one-api/relay/common"
+	"one-api/relay/helper"
+	"one-api/service"
+	"one-api/setting/model_setting"
+	"strings"
+)
+
+func getAndValidateClaudeRequest(c *gin.Context) (textRequest *dto.ClaudeRequest, err error) {
+	textRequest = &dto.ClaudeRequest{}
+	err = c.ShouldBindJSON(textRequest)
+	if err != nil {
+		return nil, err
+	}
+	if textRequest.Messages == nil || len(textRequest.Messages) == 0 {
+		return nil, errors.New("field messages is required")
+	}
+	if textRequest.Model == "" {
+		return nil, errors.New("field model is required")
+	}
+	return textRequest, nil
+}
+
+func ClaudeHelper(c *gin.Context) (claudeError *dto.ClaudeErrorWithStatusCode) {
+
+	relayInfo := relaycommon.GenRelayInfoClaude(c)
+
+	// get & validate textRequest 获取并验证文本请求
+	textRequest, err := getAndValidateClaudeRequest(c)
+	if err != nil {
+		return service.ClaudeErrorWrapperLocal(err, "invalid_claude_request", http.StatusBadRequest)
+	}
+
+	if textRequest.Stream {
+		relayInfo.IsStream = true
+	}
+
+	err = helper.ModelMappedHelper(c, relayInfo)
+	if err != nil {
+		return service.ClaudeErrorWrapperLocal(err, "model_mapped_error", http.StatusInternalServerError)
+	}
+
+	textRequest.Model = relayInfo.UpstreamModelName
+
+	promptTokens, err := getClaudePromptTokens(textRequest, relayInfo)
+	// count messages token error 计算promptTokens错误
+	if err != nil {
+		return service.ClaudeErrorWrapperLocal(err, "count_token_messages_failed", http.StatusInternalServerError)
+	}
+
+	priceData, err := helper.ModelPriceHelper(c, relayInfo, promptTokens, int(textRequest.MaxTokens))
+	if err != nil {
+		return service.ClaudeErrorWrapperLocal(err, "model_price_error", http.StatusInternalServerError)
+	}
+
+	// pre-consume quota 预消耗配额
+	preConsumedQuota, userQuota, openaiErr := preConsumeQuota(c, priceData.ShouldPreConsumedQuota, relayInfo)
+
+	if openaiErr != nil {
+		return service.OpenAIErrorToClaudeError(openaiErr)
+	}
+	defer func() {
+		if openaiErr != nil {
+			returnPreConsumedQuota(c, relayInfo, userQuota, preConsumedQuota)
+		}
+	}()
+
+	adaptor := GetAdaptor(relayInfo.ApiType)
+	if adaptor == nil {
+		return service.ClaudeErrorWrapperLocal(fmt.Errorf("invalid api type: %d", relayInfo.ApiType), "invalid_api_type", http.StatusBadRequest)
+	}
+	adaptor.Init(relayInfo)
+	var requestBody io.Reader
+
+	if textRequest.MaxTokens == 0 {
+		textRequest.MaxTokens = uint(model_setting.GetClaudeSettings().GetDefaultMaxTokens(textRequest.Model))
+	}
+
+	if model_setting.GetClaudeSettings().ThinkingAdapterEnabled &&
+		strings.HasSuffix(textRequest.Model, "-thinking") {
+		if textRequest.Thinking == nil {
+			// 因为BudgetTokens 必须大于1024
+			if textRequest.MaxTokens < 1280 {
+				textRequest.MaxTokens = 1280
+			}
+
+			// BudgetTokens 为 max_tokens 的 80%
+			textRequest.Thinking = &dto.Thinking{
+				Type:         "enabled",
+				BudgetTokens: int(float64(textRequest.MaxTokens) * model_setting.GetClaudeSettings().ThinkingAdapterBudgetTokensPercentage),
+			}
+			// TODO: 临时处理
+			// https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking#important-considerations-when-using-extended-thinking
+			textRequest.TopP = 0
+			textRequest.Temperature = common.GetPointer[float64](1.0)
+		}
+		textRequest.Model = strings.TrimSuffix(textRequest.Model, "-thinking")
+		relayInfo.UpstreamModelName = textRequest.Model
+	}
+
+	convertedRequest, err := adaptor.ConvertClaudeRequest(c, relayInfo, textRequest)
+	if err != nil {
+		return service.ClaudeErrorWrapperLocal(err, "convert_request_failed", http.StatusInternalServerError)
+	}
+	jsonData, err := json.Marshal(convertedRequest)
+	if common.DebugEnabled {
+		println("requestBody: ", string(jsonData))
+	}
+	if err != nil {
+		return service.ClaudeErrorWrapperLocal(err, "json_marshal_failed", http.StatusInternalServerError)
+	}
+	requestBody = bytes.NewBuffer(jsonData)
+
+	statusCodeMappingStr := c.GetString("status_code_mapping")
+	var httpResp *http.Response
+	resp, err := adaptor.DoRequest(c, relayInfo, requestBody)
+	if err != nil {
+		return service.ClaudeErrorWrapperLocal(err, "do_request_failed", http.StatusInternalServerError)
+	}
+
+	if resp != nil {
+		httpResp = resp.(*http.Response)
+		relayInfo.IsStream = relayInfo.IsStream || strings.HasPrefix(httpResp.Header.Get("Content-Type"), "text/event-stream")
+		if httpResp.StatusCode != http.StatusOK {
+			openaiErr = service.RelayErrorHandler(httpResp, false)
+			// reset status code 重置状态码
+			service.ResetStatusCode(openaiErr, statusCodeMappingStr)
+			return service.OpenAIErrorToClaudeError(openaiErr)
+		}
+	}
+
+	usage, openaiErr := adaptor.DoResponse(c, httpResp, relayInfo)
+	//log.Printf("usage: %v", usage)
+	if openaiErr != nil {
+		// reset status code 重置状态码
+		service.ResetStatusCode(openaiErr, statusCodeMappingStr)
+		return service.OpenAIErrorToClaudeError(openaiErr)
+	}
+	service.PostClaudeConsumeQuota(c, relayInfo, usage.(*dto.Usage), preConsumedQuota, userQuota, priceData, "")
+	return nil
+}
+
+func getClaudePromptTokens(textRequest *dto.ClaudeRequest, info *relaycommon.RelayInfo) (int, error) {
+	var promptTokens int
+	var err error
+	switch info.RelayMode {
+	default:
+		promptTokens, err = service.CountTokenClaudeRequest(*textRequest, info.UpstreamModelName)
+	}
+	info.PromptTokens = promptTokens
+	return promptTokens, err
+}

+ 30 - 0
relay/common/relay_info.go

@@ -15,8 +15,24 @@ import (
 type ThinkingContentInfo struct {
 	IsFirstThinkingContent  bool
 	SendLastThinkingContent bool
+	HasSentThinkingContent  bool
 }
 
+const (
+	LastMessageTypeText  = "text"
+	LastMessageTypeTools = "tools"
+)
+
+type ClaudeConvertInfo struct {
+	LastMessagesType string
+	Index            int
+}
+
+const (
+	RelayFormatOpenAI = "openai"
+	RelayFormatClaude = "claude"
+)
+
 type RelayInfo struct {
 	ChannelType       int
 	ChannelId         int
@@ -58,7 +74,10 @@ type RelayInfo struct {
 	UserSetting          map[string]interface{}
 	UserEmail            string
 	UserQuota            int
+	RelayFormat          string
+	SendResponseCount    int
 	ThinkingContentInfo
+	ClaudeConvertInfo
 }
 
 // 定义支持流式选项的通道类型
@@ -82,6 +101,16 @@ func GenRelayInfoWs(c *gin.Context, ws *websocket.Conn) *RelayInfo {
 	return info
 }
 
+func GenRelayInfoClaude(c *gin.Context) *RelayInfo {
+	info := GenRelayInfo(c)
+	info.RelayFormat = RelayFormatClaude
+	info.ShouldIncludeUsage = false
+	info.ClaudeConvertInfo = ClaudeConvertInfo{
+		LastMessagesType: LastMessageTypeText,
+	}
+	return info
+}
+
 func GenRelayInfo(c *gin.Context) *RelayInfo {
 	channelType := c.GetInt("channel_type")
 	channelId := c.GetInt("channel_id")
@@ -123,6 +152,7 @@ func GenRelayInfo(c *gin.Context) *RelayInfo {
 		ApiKey:         strings.TrimPrefix(c.Request.Header.Get("Authorization"), "Bearer "),
 		Organization:   c.GetString("channel_organization"),
 		ChannelSetting: channelSetting,
+		RelayFormat:    RelayFormatOpenAI,
 		ThinkingContentInfo: ThinkingContentInfo{
 			IsFirstThinkingContent:  true,
 			SendLastThinkingContent: false,

+ 35 - 0
relay/common_handler/rerank.go

@@ -0,0 +1,35 @@
+package common_handler
+
+import (
+	"encoding/json"
+	"github.com/gin-gonic/gin"
+	"io"
+	"net/http"
+	"one-api/dto"
+	"one-api/service"
+)
+
+func RerankHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
+	responseBody, err := io.ReadAll(resp.Body)
+	if err != nil {
+		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
+	}
+	err = resp.Body.Close()
+	if err != nil {
+		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
+	}
+	var jinaResp dto.RerankResponse
+	err = json.Unmarshal(responseBody, &jinaResp)
+	if err != nil {
+		return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
+	}
+
+	jsonResponse, err := json.Marshal(jinaResp)
+	if err != nil {
+		return service.OpenAIErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
+	}
+	c.Writer.Header().Set("Content-Type", "application/json")
+	c.Writer.WriteHeader(resp.StatusCode)
+	_, err = c.Writer.Write(jsonResponse)
+	return nil, &jinaResp.Usage
+}

+ 3 - 0
relay/constant/api_type.go

@@ -31,6 +31,7 @@ const (
 	APITypeVolcEngine
 	APITypeBaiduV2
 	APITypeOpenRouter
+	APITypeXinference
 	APITypeDummy // this one is only for count, do not add any channel after this
 )
 
@@ -89,6 +90,8 @@ func ChannelType2APIType(channelType int) (int, bool) {
 		apiType = APITypeBaiduV2
 	case common.ChannelTypeOpenRouter:
 		apiType = APITypeOpenRouter
+	case common.ChannelTypeXinference:
+		apiType = APITypeXinference
 	}
 	if apiType == -1 {
 		return APITypeOpenAI, false

+ 24 - 0
relay/helper/common.go

@@ -19,6 +19,30 @@ func SetEventStreamHeaders(c *gin.Context) {
 	c.Writer.Header().Set("X-Accel-Buffering", "no")
 }
 
+func ClaudeData(c *gin.Context, resp dto.ClaudeResponse) error {
+	jsonData, err := json.Marshal(resp)
+	if err != nil {
+		common.SysError("error marshalling stream response: " + err.Error())
+	} else {
+		c.Render(-1, common.CustomEvent{Data: fmt.Sprintf("event: %s\n", resp.Type)})
+		c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonData)})
+	}
+	if flusher, ok := c.Writer.(http.Flusher); ok {
+		flusher.Flush()
+	} else {
+		return errors.New("streaming error: flusher not found")
+	}
+	return nil
+}
+
+func ClaudeChunkData(c *gin.Context, resp dto.ClaudeResponse, data string) {
+	c.Render(-1, common.CustomEvent{Data: fmt.Sprintf("event: %s\n", resp.Type)})
+	c.Render(-1, common.CustomEvent{Data: fmt.Sprintf("data: %s\n", data)})
+	if flusher, ok := c.Writer.(http.Flusher); ok {
+		flusher.Flush()
+	}
+}
+
 func StringData(c *gin.Context, str string) error {
 	//str = strings.TrimPrefix(str, "data: ")
 	//str = strings.TrimSuffix(str, "\r")

+ 4 - 0
relay/helper/price.go

@@ -16,6 +16,7 @@ type PriceData struct {
 	CacheRatio             float64
 	GroupRatio             float64
 	UsePrice               bool
+	CacheCreationRatio     float64
 	ShouldPreConsumedQuota int
 }
 
@@ -26,6 +27,7 @@ func ModelPriceHelper(c *gin.Context, info *relaycommon.RelayInfo, promptTokens
 	var modelRatio float64
 	var completionRatio float64
 	var cacheRatio float64
+	var cacheCreationRatio float64
 	if !usePrice {
 		preConsumedTokens := common.PreConsumedQuota
 		if maxTokens != 0 {
@@ -42,6 +44,7 @@ func ModelPriceHelper(c *gin.Context, info *relaycommon.RelayInfo, promptTokens
 		}
 		completionRatio = operation_setting.GetCompletionRatio(info.OriginModelName)
 		cacheRatio, _ = operation_setting.GetCacheRatio(info.OriginModelName)
+		cacheCreationRatio, _ = operation_setting.GetCreateCacheRatio(info.OriginModelName)
 		ratio := modelRatio * groupRatio
 		preConsumedQuota = int(float64(preConsumedTokens) * ratio)
 	} else {
@@ -54,6 +57,7 @@ func ModelPriceHelper(c *gin.Context, info *relaycommon.RelayInfo, promptTokens
 		GroupRatio:             groupRatio,
 		UsePrice:               usePrice,
 		CacheRatio:             cacheRatio,
+		CacheCreationRatio:     cacheCreationRatio,
 		ShouldPreConsumedQuota: preConsumedQuota,
 	}, nil
 }

+ 4 - 1
relay/relay-text.go

@@ -160,7 +160,7 @@ func TextHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
 		}
 		requestBody = bytes.NewBuffer(body)
 	} else {
-		convertedRequest, err := adaptor.ConvertRequest(c, relayInfo, textRequest)
+		convertedRequest, err := adaptor.ConvertOpenAIRequest(c, relayInfo, textRequest)
 		if err != nil {
 			return service.OpenAIErrorWrapperLocal(err, "convert_request_failed", http.StatusInternalServerError)
 		}
@@ -168,6 +168,9 @@ func TextHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
 		if err != nil {
 			return service.OpenAIErrorWrapperLocal(err, "json_marshal_failed", http.StatusInternalServerError)
 		}
+		if common.DebugEnabled {
+			println("requestBody: ", string(jsonData))
+		}
 		requestBody = bytes.NewBuffer(jsonData)
 	}
 

+ 3 - 4
relay/relay_adaptor.go

@@ -18,7 +18,6 @@ import (
 	"one-api/relay/channel/mokaai"
 	"one-api/relay/channel/ollama"
 	"one-api/relay/channel/openai"
-	"one-api/relay/channel/openrouter"
 	"one-api/relay/channel/palm"
 	"one-api/relay/channel/perplexity"
 	"one-api/relay/channel/siliconflow"
@@ -34,8 +33,6 @@ import (
 
 func GetAdaptor(apiType int) channel.Adaptor {
 	switch apiType {
-	//case constant.APITypeAIProxyLibrary:
-	//	return &aiproxy.Adaptor{}
 	case constant.APITypeAli:
 		return &ali.Adaptor{}
 	case constant.APITypeAnthropic:
@@ -85,7 +82,9 @@ func GetAdaptor(apiType int) channel.Adaptor {
 	case constant.APITypeBaiduV2:
 		return &baidu_v2.Adaptor{}
 	case constant.APITypeOpenRouter:
-		return &openrouter.Adaptor{}
+		return &openai.Adaptor{}
+	case constant.APITypeXinference:
+		return &openai.Adaptor{}
 	}
 	return nil
 }

+ 1 - 0
router/relay-router.go

@@ -35,6 +35,7 @@ func SetRelayRouter(router *gin.Engine) {
 		//http router
 		httpRouter := relayV1Router.Group("")
 		httpRouter.Use(middleware.Distribute())
+		httpRouter.POST("/messages", controller.RelayClaude)
 		httpRouter.POST("/completions", controller.Relay)
 		httpRouter.POST("/chat/completions", controller.Relay)
 		httpRouter.POST("/edits", controller.Relay)

+ 351 - 0
service/convert.go

@@ -0,0 +1,351 @@
+package service
+
+import (
+	"encoding/json"
+	"fmt"
+	"one-api/common"
+	"one-api/dto"
+	relaycommon "one-api/relay/common"
+)
+
+func ClaudeToOpenAIRequest(claudeRequest dto.ClaudeRequest) (*dto.GeneralOpenAIRequest, error) {
+	openAIRequest := dto.GeneralOpenAIRequest{
+		Model:       claudeRequest.Model,
+		MaxTokens:   claudeRequest.MaxTokens,
+		Temperature: claudeRequest.Temperature,
+		TopP:        claudeRequest.TopP,
+		Stream:      claudeRequest.Stream,
+	}
+
+	// Convert stop sequences
+	if len(claudeRequest.StopSequences) == 1 {
+		openAIRequest.Stop = claudeRequest.StopSequences[0]
+	} else if len(claudeRequest.StopSequences) > 1 {
+		openAIRequest.Stop = claudeRequest.StopSequences
+	}
+
+	// Convert tools
+	tools, _ := common.Any2Type[[]dto.Tool](claudeRequest.Tools)
+	openAITools := make([]dto.ToolCallRequest, 0)
+	for _, claudeTool := range tools {
+		openAITool := dto.ToolCallRequest{
+			Type: "function",
+			Function: dto.FunctionRequest{
+				Name:        claudeTool.Name,
+				Description: claudeTool.Description,
+				Parameters:  claudeTool.InputSchema,
+			},
+		}
+		openAITools = append(openAITools, openAITool)
+	}
+	openAIRequest.Tools = openAITools
+
+	// Convert messages
+	openAIMessages := make([]dto.Message, 0)
+
+	// Add system message if present
+	if claudeRequest.System != nil {
+		if claudeRequest.IsStringSystem() {
+			openAIMessage := dto.Message{
+				Role: "system",
+			}
+			openAIMessage.SetStringContent(claudeRequest.GetStringSystem())
+			openAIMessages = append(openAIMessages, openAIMessage)
+		} else {
+			systems := claudeRequest.ParseSystem()
+			if len(systems) > 0 {
+				systemStr := ""
+				openAIMessage := dto.Message{
+					Role: "system",
+				}
+				for _, system := range systems {
+					systemStr += system.Type
+				}
+				openAIMessage.SetStringContent(systemStr)
+				openAIMessages = append(openAIMessages, openAIMessage)
+			}
+		}
+	}
+	for _, claudeMessage := range claudeRequest.Messages {
+		openAIMessage := dto.Message{
+			Role: claudeMessage.Role,
+		}
+
+		//log.Printf("claudeMessage.Content: %v", claudeMessage.Content)
+		if claudeMessage.IsStringContent() {
+			openAIMessage.SetStringContent(claudeMessage.GetStringContent())
+		} else {
+			content, err := claudeMessage.ParseContent()
+			if err != nil {
+				return nil, err
+			}
+			contents := content
+			var toolCalls []dto.ToolCallRequest
+			mediaMessages := make([]dto.MediaContent, 0, len(contents))
+
+			for _, mediaMsg := range contents {
+				switch mediaMsg.Type {
+				case "text":
+					message := dto.MediaContent{
+						Type: "text",
+						Text: mediaMsg.GetText(),
+					}
+					mediaMessages = append(mediaMessages, message)
+				case "image":
+					// Handle image conversion (base64 to URL or keep as is)
+					imageData := fmt.Sprintf("data:%s;base64,%s", mediaMsg.Source.MediaType, mediaMsg.Source.Data)
+					//textContent += fmt.Sprintf("[Image: %s]", imageData)
+					mediaMessage := dto.MediaContent{
+						Type:     "image_url",
+						ImageUrl: &dto.MessageImageUrl{Url: imageData},
+					}
+					mediaMessages = append(mediaMessages, mediaMessage)
+				case "tool_use":
+					toolCall := dto.ToolCallRequest{
+						ID:   mediaMsg.Id,
+						Type: "function",
+						Function: dto.FunctionRequest{
+							Name:      mediaMsg.Name,
+							Arguments: toJSONString(mediaMsg.Input),
+						},
+					}
+					toolCalls = append(toolCalls, toolCall)
+				case "tool_result":
+					// Add tool result as a separate message
+					oaiToolMessage := dto.Message{
+						Role:       "tool",
+						Name:       &mediaMsg.Name,
+						ToolCallId: mediaMsg.ToolUseId,
+					}
+					//oaiToolMessage.SetStringContent(*mediaMsg.GetMediaContent().Text)
+					if mediaMsg.IsStringContent() {
+						oaiToolMessage.SetStringContent(mediaMsg.GetStringContent())
+					} else {
+						mediaContents := mediaMsg.ParseMediaContent()
+						if len(mediaContents) > 0 && mediaContents[0].Text != nil {
+							oaiToolMessage.SetStringContent(*mediaContents[0].Text)
+						}
+					}
+					openAIMessages = append(openAIMessages, oaiToolMessage)
+				}
+			}
+
+			if len(mediaMessages) > 0 {
+				openAIMessage.SetMediaContent(mediaMessages)
+			}
+
+			if len(toolCalls) > 0 {
+				openAIMessage.SetToolCalls(toolCalls)
+			}
+		}
+		if len(openAIMessage.ParseContent()) > 0 {
+			openAIMessages = append(openAIMessages, openAIMessage)
+		}
+	}
+
+	openAIRequest.Messages = openAIMessages
+
+	return &openAIRequest, nil
+}
+
+func OpenAIErrorToClaudeError(openAIError *dto.OpenAIErrorWithStatusCode) *dto.ClaudeErrorWithStatusCode {
+	claudeError := dto.ClaudeError{
+		Type:    "new_api_error",
+		Message: openAIError.Error.Message,
+	}
+	return &dto.ClaudeErrorWithStatusCode{
+		Error:      claudeError,
+		StatusCode: openAIError.StatusCode,
+	}
+}
+
+func ClaudeErrorToOpenAIError(claudeError *dto.ClaudeErrorWithStatusCode) *dto.OpenAIErrorWithStatusCode {
+	openAIError := dto.OpenAIError{
+		Message: claudeError.Error.Message,
+		Type:    "new_api_error",
+	}
+	return &dto.OpenAIErrorWithStatusCode{
+		Error:      openAIError,
+		StatusCode: claudeError.StatusCode,
+	}
+}
+
+func generateStopBlock(index int) *dto.ClaudeResponse {
+	return &dto.ClaudeResponse{
+		Type:  "content_block_stop",
+		Index: common.GetPointer[int](index),
+	}
+}
+
+func StreamResponseOpenAI2Claude(openAIResponse *dto.ChatCompletionsStreamResponse, info *relaycommon.RelayInfo) []*dto.ClaudeResponse {
+	var claudeResponses []*dto.ClaudeResponse
+	if info.SendResponseCount == 1 {
+		msg := &dto.ClaudeMediaMessage{
+			Id:    openAIResponse.Id,
+			Model: openAIResponse.Model,
+			Type:  "message",
+			Role:  "assistant",
+			Usage: &dto.ClaudeUsage{
+				InputTokens:  info.PromptTokens,
+				OutputTokens: 0,
+			},
+		}
+		msg.SetContent(make([]any, 0))
+		claudeResponses = append(claudeResponses, &dto.ClaudeResponse{
+			Type:    "message_start",
+			Message: msg,
+		})
+		claudeResponses = append(claudeResponses)
+		//claudeResponses = append(claudeResponses, &dto.ClaudeResponse{
+		//	Type: "ping",
+		//})
+		if openAIResponse.IsToolCall() {
+			resp := &dto.ClaudeResponse{
+				Type: "content_block_start",
+				ContentBlock: &dto.ClaudeMediaMessage{
+					Id:   openAIResponse.GetFirstToolCall().ID,
+					Type: "tool_use",
+					Name: openAIResponse.GetFirstToolCall().Function.Name,
+				},
+			}
+			resp.SetIndex(0)
+			claudeResponses = append(claudeResponses, resp)
+		} else {
+			resp := &dto.ClaudeResponse{
+				Type: "content_block_start",
+				ContentBlock: &dto.ClaudeMediaMessage{
+					Type: "text",
+					Text: common.GetPointer[string](""),
+				},
+			}
+			resp.SetIndex(0)
+			claudeResponses = append(claudeResponses, resp)
+		}
+		return claudeResponses
+	}
+
+	if len(openAIResponse.Choices) == 0 {
+		// no choices
+		// TODO: handle this case
+		return claudeResponses
+	} else {
+		chosenChoice := openAIResponse.Choices[0]
+		if chosenChoice.FinishReason != nil && *chosenChoice.FinishReason != "" {
+			// should be done
+			claudeResponses = append(claudeResponses, generateStopBlock(info.ClaudeConvertInfo.Index))
+			if openAIResponse.Usage != nil {
+				claudeResponses = append(claudeResponses, &dto.ClaudeResponse{
+					Type: "message_delta",
+					Usage: &dto.ClaudeUsage{
+						InputTokens:  openAIResponse.Usage.PromptTokens,
+						OutputTokens: openAIResponse.Usage.CompletionTokens,
+					},
+					Delta: &dto.ClaudeMediaMessage{
+						StopReason: common.GetPointer[string](stopReasonOpenAI2Claude(*chosenChoice.FinishReason)),
+					},
+				})
+			}
+			claudeResponses = append(claudeResponses, &dto.ClaudeResponse{
+				Type: "message_stop",
+			})
+		} else {
+			var claudeResponse dto.ClaudeResponse
+			claudeResponse.SetIndex(0)
+			claudeResponse.Type = "content_block_delta"
+			if len(chosenChoice.Delta.ToolCalls) > 0 {
+				if info.ClaudeConvertInfo.LastMessagesType == relaycommon.LastMessageTypeText {
+					claudeResponses = append(claudeResponses, generateStopBlock(info.ClaudeConvertInfo.Index))
+					info.ClaudeConvertInfo.Index++
+					claudeResponses = append(claudeResponses, &dto.ClaudeResponse{
+						Index: &info.ClaudeConvertInfo.Index,
+						Type:  "content_block_start",
+						ContentBlock: &dto.ClaudeMediaMessage{
+							Id:    openAIResponse.GetFirstToolCall().ID,
+							Type:  "tool_use",
+							Name:  openAIResponse.GetFirstToolCall().Function.Name,
+							Input: map[string]interface{}{},
+						},
+					})
+				}
+				info.ClaudeConvertInfo.LastMessagesType = relaycommon.LastMessageTypeTools
+				// tools delta
+				claudeResponse.Delta = &dto.ClaudeMediaMessage{
+					Type:        "input_json_delta",
+					PartialJson: &chosenChoice.Delta.ToolCalls[0].Function.Arguments,
+				}
+			} else {
+				info.ClaudeConvertInfo.LastMessagesType = relaycommon.LastMessageTypeText
+				// text delta
+				claudeResponse.Delta = &dto.ClaudeMediaMessage{
+					Type: "text_delta",
+					Text: common.GetPointer[string](chosenChoice.Delta.GetContentString()),
+				}
+			}
+			claudeResponse.Index = &info.ClaudeConvertInfo.Index
+			claudeResponses = append(claudeResponses, &claudeResponse)
+		}
+	}
+
+	return claudeResponses
+}
+
+func ResponseOpenAI2Claude(openAIResponse *dto.OpenAITextResponse, info *relaycommon.RelayInfo) *dto.ClaudeResponse {
+	var stopReason string
+	contents := make([]dto.ClaudeMediaMessage, 0)
+	claudeResponse := &dto.ClaudeResponse{
+		Id:    openAIResponse.Id,
+		Type:  "message",
+		Role:  "assistant",
+		Model: openAIResponse.Model,
+	}
+	for _, choice := range openAIResponse.Choices {
+		stopReason = stopReasonOpenAI2Claude(choice.FinishReason)
+		claudeContent := dto.ClaudeMediaMessage{}
+		if choice.FinishReason == "tool_calls" {
+			claudeContent.Type = "tool_use"
+			claudeContent.Id = choice.Message.ToolCallId
+			claudeContent.Name = choice.Message.ParseToolCalls()[0].Function.Name
+			var mapParams map[string]interface{}
+			if err := json.Unmarshal([]byte(choice.Message.ParseToolCalls()[0].Function.Arguments), &mapParams); err == nil {
+				claudeContent.Input = mapParams
+			} else {
+				claudeContent.Input = choice.Message.ParseToolCalls()[0].Function.Arguments
+			}
+		} else {
+			claudeContent.Type = "text"
+			claudeContent.SetText(choice.Message.StringContent())
+		}
+		contents = append(contents, claudeContent)
+	}
+	claudeResponse.Content = contents
+	claudeResponse.StopReason = stopReason
+	claudeResponse.Usage = &dto.ClaudeUsage{
+		InputTokens:  openAIResponse.PromptTokens,
+		OutputTokens: openAIResponse.CompletionTokens,
+	}
+
+	return claudeResponse
+}
+
+func stopReasonOpenAI2Claude(reason string) string {
+	switch reason {
+	case "stop":
+		return "end_turn"
+	case "stop_sequence":
+		return "stop_sequence"
+	case "max_tokens":
+		return "max_tokens"
+	case "tool_calls":
+		return "tool_use"
+	default:
+		return reason
+	}
+}
+
+func toJSONString(v interface{}) string {
+	b, err := json.Marshal(v)
+	if err != nil {
+		return "{}"
+	}
+	return string(b)
+}

+ 24 - 0
service/error.go

@@ -50,6 +50,30 @@ func OpenAIErrorWrapperLocal(err error, code string, statusCode int) *dto.OpenAI
 	return openaiErr
 }
 
+func ClaudeErrorWrapper(err error, code string, statusCode int) *dto.ClaudeErrorWithStatusCode {
+	text := err.Error()
+	lowerText := strings.ToLower(text)
+	if strings.Contains(lowerText, "post") || strings.Contains(lowerText, "dial") || strings.Contains(lowerText, "http") {
+		common.SysLog(fmt.Sprintf("error: %s", text))
+		text = "请求上游地址失败"
+	}
+	claudeError := dto.ClaudeError{
+		Message: text,
+		Type:    "new_api_error",
+		//Code:    code,
+	}
+	return &dto.ClaudeErrorWithStatusCode{
+		Error:      claudeError,
+		StatusCode: statusCode,
+	}
+}
+
+func ClaudeErrorWrapperLocal(err error, code string, statusCode int) *dto.ClaudeErrorWithStatusCode {
+	claudeErr := ClaudeErrorWrapper(err, code, statusCode)
+	claudeErr.LocalError = true
+	return claudeErr
+}
+
 func RelayErrorHandler(resp *http.Response, showBodyWhenFail bool) (errWithStatusCode *dto.OpenAIErrorWithStatusCode) {
 	errWithStatusCode = &dto.OpenAIErrorWithStatusCode{
 		StatusCode: resp.StatusCode,

+ 9 - 0
service/log_info_generate.go

@@ -53,3 +53,12 @@ func GenerateAudioOtherInfo(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
 	info["audio_completion_ratio"] = audioCompletionRatio
 	return info
 }
+
+func GenerateClaudeOtherInfo(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, modelRatio, groupRatio, completionRatio float64,
+	cacheTokens int, cacheRatio float64, cacheCreationTokens int, cacheCreationRatio float64, modelPrice float64) map[string]interface{} {
+	info := GenerateTextOtherInfo(ctx, relayInfo, modelRatio, groupRatio, completionRatio, cacheTokens, cacheRatio, modelPrice)
+	info["claude"] = true
+	info["cache_creation_tokens"] = cacheCreationTokens
+	info["cache_creation_ratio"] = cacheCreationRatio
+	return info
+}

+ 69 - 0
service/quota.go

@@ -194,6 +194,75 @@ func PostWssConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, mod
 		tokenName, quota, logContent, relayInfo.TokenId, userQuota, int(useTimeSeconds), relayInfo.IsStream, relayInfo.Group, other)
 }
 
+func PostClaudeConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
+	usage *dto.Usage, preConsumedQuota int, userQuota int, priceData helper.PriceData, extraContent string) {
+
+	useTimeSeconds := time.Now().Unix() - relayInfo.StartTime.Unix()
+	promptTokens := usage.PromptTokens
+	completionTokens := usage.CompletionTokens
+	modelName := relayInfo.OriginModelName
+
+	tokenName := ctx.GetString("token_name")
+	completionRatio := priceData.CompletionRatio
+	modelRatio := priceData.ModelRatio
+	groupRatio := priceData.GroupRatio
+	modelPrice := priceData.ModelPrice
+
+	cacheRatio := priceData.CacheRatio
+	cacheTokens := usage.PromptTokensDetails.CachedTokens
+
+	cacheCreationRatio := priceData.CacheCreationRatio
+	cacheCreationTokens := usage.PromptTokensDetails.CachedCreationTokens
+
+	calculateQuota := 0.0
+	if !priceData.UsePrice {
+		calculateQuota = float64(promptTokens)
+		calculateQuota += float64(cacheTokens) * cacheRatio
+		calculateQuota += float64(cacheCreationTokens) * cacheCreationRatio
+		calculateQuota += float64(completionTokens) * completionRatio
+		calculateQuota = calculateQuota * groupRatio * modelRatio
+	} else {
+		calculateQuota = modelPrice * common.QuotaPerUnit * groupRatio
+	}
+
+	if modelRatio != 0 && calculateQuota <= 0 {
+		calculateQuota = 1
+	}
+
+	quota := int(calculateQuota)
+
+	totalTokens := promptTokens + completionTokens
+
+	var logContent string
+	// record all the consume log even if quota is 0
+	if totalTokens == 0 {
+		// in this case, must be some error happened
+		// we cannot just return, because we may have to return the pre-consumed quota
+		quota = 0
+		logContent += fmt.Sprintf("(可能是上游出错)")
+		common.LogError(ctx, fmt.Sprintf("total tokens is 0, cannot consume quota, userId %d, channelId %d, "+
+			"tokenId %d, model %s, pre-consumed quota %d", relayInfo.UserId, relayInfo.ChannelId, relayInfo.TokenId, modelName, preConsumedQuota))
+	} else {
+		//if sensitiveResp != nil {
+		//	logContent += fmt.Sprintf(",敏感词:%s", strings.Join(sensitiveResp.SensitiveWords, ", "))
+		//}
+		quotaDelta := quota - preConsumedQuota
+		if quotaDelta != 0 {
+			err := PostConsumeQuota(relayInfo, quotaDelta, preConsumedQuota, true)
+			if err != nil {
+				common.LogError(ctx, "error consuming token remain quota: "+err.Error())
+			}
+		}
+		model.UpdateUserUsedQuotaAndRequestCount(relayInfo.UserId, quota)
+		model.UpdateChannelUsedQuota(relayInfo.ChannelId, quota)
+	}
+
+	other := GenerateClaudeOtherInfo(ctx, relayInfo, modelRatio, groupRatio, completionRatio,
+		cacheTokens, cacheRatio, cacheCreationTokens, cacheCreationRatio, modelPrice)
+	model.RecordConsumeLog(ctx, relayInfo.UserId, relayInfo.ChannelId, promptTokens, completionTokens, modelName,
+		tokenName, quota, logContent, relayInfo.TokenId, userQuota, int(useTimeSeconds), relayInfo.IsStream, relayInfo.Group, other)
+}
+
 func PostAudioConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
 	usage *dto.Usage, preConsumedQuota int, userQuota int, priceData helper.PriceData, extraContent string) {
 

+ 105 - 0
service/token_counter.go

@@ -1,6 +1,7 @@
 package service
 
 import (
+	"encoding/json"
 	"errors"
 	"fmt"
 	"image"
@@ -192,6 +193,110 @@ func CountTokenChatRequest(info *relaycommon.RelayInfo, request dto.GeneralOpenA
 	return tkm, nil
 }
 
+func CountTokenClaudeRequest(request dto.ClaudeRequest, model string) (int, error) {
+	tkm := 0
+
+	// Count tokens in messages
+	msgTokens, err := CountTokenClaudeMessages(request.Messages, model, request.Stream)
+	if err != nil {
+		return 0, err
+	}
+	tkm += msgTokens
+
+	// Count tokens in system message
+	if request.System != "" {
+		systemTokens, err := CountTokenInput(request.System, model)
+		if err != nil {
+			return 0, err
+		}
+		tkm += systemTokens
+	}
+
+	if request.Tools != nil {
+		// check is array
+		if tools, ok := request.Tools.([]any); ok {
+			if len(tools) > 0 {
+				parsedTools, err1 := common.Any2Type[[]dto.Tool](request.Tools)
+				if err1 != nil {
+					return 0, fmt.Errorf("tools: Input should be a valid list: %v", err)
+				}
+				toolTokens, err2 := CountTokenClaudeTools(parsedTools, model)
+				if err2 != nil {
+					return 0, fmt.Errorf("tools: %v", err)
+				}
+				tkm += toolTokens
+			}
+		} else {
+			return 0, errors.New("tools: Input should be a valid list")
+		}
+	}
+
+	return tkm, nil
+}
+
+func CountTokenClaudeMessages(messages []dto.ClaudeMessage, model string, stream bool) (int, error) {
+	tokenEncoder := getTokenEncoder(model)
+	tokenNum := 0
+
+	for _, message := range messages {
+		// Count tokens for role
+		tokenNum += getTokenNum(tokenEncoder, message.Role)
+		if message.IsStringContent() {
+			tokenNum += getTokenNum(tokenEncoder, message.GetStringContent())
+		} else {
+			content, err := message.ParseContent()
+			if err != nil {
+				return 0, err
+			}
+			for _, mediaMessage := range content {
+				switch mediaMessage.Type {
+				case "text":
+					tokenNum += getTokenNum(tokenEncoder, mediaMessage.GetText())
+				case "image":
+					//imageTokenNum, err := getClaudeImageToken(mediaMsg.Source, model, stream)
+					//if err != nil {
+					//	return 0, err
+					//}
+					tokenNum += 1000
+				case "tool_use":
+					tokenNum += getTokenNum(tokenEncoder, mediaMessage.Name)
+					inputJSON, _ := json.Marshal(mediaMessage.Input)
+					tokenNum += getTokenNum(tokenEncoder, string(inputJSON))
+				case "tool_result":
+					contentJSON, _ := json.Marshal(mediaMessage.Content)
+					tokenNum += getTokenNum(tokenEncoder, string(contentJSON))
+				}
+			}
+		}
+	}
+
+	// Add a constant for message formatting (this may need adjustment based on Claude's exact formatting)
+	tokenNum += len(messages) * 2 // Assuming 2 tokens per message for formatting
+
+	return tokenNum, nil
+}
+
+func CountTokenClaudeTools(tools []dto.Tool, model string) (int, error) {
+	tokenEncoder := getTokenEncoder(model)
+	tokenNum := 0
+
+	for _, tool := range tools {
+		tokenNum += getTokenNum(tokenEncoder, tool.Name)
+		tokenNum += getTokenNum(tokenEncoder, tool.Description)
+
+		schemaJSON, err := json.Marshal(tool.InputSchema)
+		if err != nil {
+			return 0, errors.New(fmt.Sprintf("marshal_tool_schema_fail: %s", err.Error()))
+		}
+		tokenNum += getTokenNum(tokenEncoder, string(schemaJSON))
+	}
+
+	// Add a constant for tool formatting (this may need adjustment based on Claude's exact formatting)
+	tokenNum += len(tools) * 3 // Assuming 3 tokens per tool for formatting
+
+	return tokenNum, nil
+}
+
 func CountTokenRealtime(info *relaycommon.RelayInfo, request dto.RealtimeEvent, model string) (int, int, error) {
 	audioToken := 0
 	textToken := 0

+ 42 - 29
setting/operation_setting/cache_ratio.go

@@ -7,26 +7,45 @@ import (
 )
 
 var defaultCacheRatio = map[string]float64{
-	"gpt-4":                        0.5,
-	"o1":                           0.5,
-	"o1-2024-12-17":                0.5,
-	"o1-preview-2024-09-12":        0.5,
-	"o1-preview":                   0.5,
-	"o1-mini-2024-09-12":           0.5,
-	"o1-mini":                      0.5,
-	"gpt-4o-2024-11-20":            0.5,
-	"gpt-4o-2024-08-06":            0.5,
-	"gpt-4o":                       0.5,
-	"gpt-4o-mini-2024-07-18":       0.5,
-	"gpt-4o-mini":                  0.5,
-	"gpt-4o-realtime-preview":      0.5,
-	"gpt-4o-mini-realtime-preview": 0.5,
-	"deepseek-chat":                0.25,
-	"deepseek-reasoner":            0.25,
-	"deepseek-coder":               0.25,
+	"gpt-4":                               0.5,
+	"o1":                                  0.5,
+	"o1-2024-12-17":                       0.5,
+	"o1-preview-2024-09-12":               0.5,
+	"o1-preview":                          0.5,
+	"o1-mini-2024-09-12":                  0.5,
+	"o1-mini":                             0.5,
+	"gpt-4o-2024-11-20":                   0.5,
+	"gpt-4o-2024-08-06":                   0.5,
+	"gpt-4o":                              0.5,
+	"gpt-4o-mini-2024-07-18":              0.5,
+	"gpt-4o-mini":                         0.5,
+	"gpt-4o-realtime-preview":             0.5,
+	"gpt-4o-mini-realtime-preview":        0.5,
+	"deepseek-chat":                       0.25,
+	"deepseek-reasoner":                   0.25,
+	"deepseek-coder":                      0.25,
+	"claude-3-sonnet-20240229":            0.1,
+	"claude-3-opus-20240229":              0.1,
+	"claude-3-haiku-20240307":             0.1,
+	"claude-3-5-haiku-20241022":           0.1,
+	"claude-3-5-sonnet-20240620":          0.1,
+	"claude-3-5-sonnet-20241022":          0.1,
+	"claude-3-7-sonnet-20250219":          0.1,
+	"claude-3-7-sonnet-20250219-thinking": 0.1,
 }
 
-var defaultCreateCacheRatio = map[string]float64{}
+var defaultCreateCacheRatio = map[string]float64{
+	"claude-3-sonnet-20240229":            1.25,
+	"claude-3-opus-20240229":              1.25,
+	"claude-3-haiku-20240307":             1.25,
+	"claude-3-5-haiku-20241022":           1.25,
+	"claude-3-5-sonnet-20240620":          1.25,
+	"claude-3-5-sonnet-20241022":          1.25,
+	"claude-3-7-sonnet-20250219":          1.25,
+	"claude-3-7-sonnet-20250219-thinking": 1.25,
+}
+
+//var defaultCreateCacheRatio = map[string]float64{}
 
 var cacheRatioMap map[string]float64
 var cacheRatioMapMutex sync.RWMutex
@@ -69,16 +88,10 @@ func GetCacheRatio(name string) (float64, bool) {
 	return ratio, true
 }
 
-// DefaultCacheRatio2JSONString converts the default cache ratio map to a JSON string
-func DefaultCacheRatio2JSONString() string {
-	jsonBytes, err := json.Marshal(defaultCacheRatio)
-	if err != nil {
-		common.SysError("error marshalling default cache ratio: " + err.Error())
+func GetCreateCacheRatio(name string) (float64, bool) {
+	ratio, ok := defaultCreateCacheRatio[name]
+	if !ok {
+		return 1.25, false // Default to 1.25 if not found
 	}
-	return string(jsonBytes)
-}
-
-// GetDefaultCacheRatioMap returns the default cache ratio map
-func GetDefaultCacheRatioMap() map[string]float64 {
-	return defaultCacheRatio
+	return ratio, true
 }

+ 66 - 13
web/src/components/LogsTable.js

@@ -26,8 +26,14 @@ import {
 } from '@douyinfe/semi-ui';
 import { ITEMS_PER_PAGE } from '../constants';
 import {
-  renderAudioModelPrice, renderGroup,
-  renderModelPrice, renderModelPriceSimple,
+  renderAudioModelPrice,
+  renderClaudeLogContent,
+  renderClaudeModelPrice,
+  renderClaudeModelPriceSimple,
+  renderGroup,
+  renderLogContent,
+  renderModelPrice,
+  renderModelPriceSimple,
   renderNumber,
   renderQuota,
   stringToColor
@@ -564,13 +570,23 @@ const LogsTable = () => {
           );
         }
 
-        let content = renderModelPriceSimple(
-          other.model_ratio,
-          other.model_price,
-          other.group_ratio,
-          other.cache_tokens || 0,
-          other.cache_ratio || 1.0,
-        );
+        let content = other?.claude
+          ? renderClaudeModelPriceSimple(
+            other.model_ratio,
+            other.model_price,
+            other.group_ratio,
+            other.cache_tokens || 0,
+            other.cache_ratio || 1.0,
+            other.cache_creation_tokens || 0,
+            other.cache_creation_ratio || 1.0,
+          )
+          : renderModelPriceSimple(
+            other.model_ratio,
+            other.model_price,
+            other.group_ratio,
+            other.cache_tokens || 0,
+            other.cache_ratio || 1.0,
+          );
         return (
             <Paragraph
                 ellipsis={{
@@ -818,10 +834,34 @@ const LogsTable = () => {
           value: other.cache_tokens,
         });
       }
-      expandDataLocal.push({
-        key: t('日志详情'),
-        value: logs[i].content,
-      });
+      if (other?.cache_creation_tokens > 0) {
+        expandDataLocal.push({
+          key: t('缓存创建 Tokens'),
+          value: other.cache_creation_tokens,
+        });
+      }
+      if (logs[i].type === 2) {
+        expandDataLocal.push({
+          key: t('日志详情'),
+          value: other?.claude
+            ? renderClaudeLogContent(
+              other?.model_ratio,
+              other.completion_ratio,
+              other.model_price,
+              other.group_ratio,
+              other.user_group_ratio,
+              other.cache_ratio || 1.0,
+              other.cache_creation_ratio || 1.0
+            )
+            : renderLogContent(
+              other?.model_ratio,
+              other.completion_ratio,
+              other.model_price,
+              other.group_ratio,
+              other.user_group_ratio
+            ),
+        });
+      }
       if (logs[i].type === 2) {
         let modelMapped = other?.is_model_mapped && other?.upstream_model_name && other?.upstream_model_name !== '';
         if (modelMapped) {
@@ -850,6 +890,19 @@ const LogsTable = () => {
             other?.cache_tokens || 0,
             other?.cache_ratio || 1.0,
           );
+        } else if (other?.claude) {
+          content = renderClaudeModelPrice(
+            logs[i].prompt_tokens,
+            logs[i].completion_tokens,
+            other.model_ratio,
+            other.model_price,
+            other.completion_ratio,
+            other.group_ratio,
+            other.cache_tokens || 0,
+            other.cache_ratio || 1.0,
+            other.cache_creation_tokens || 0,
+            other.cache_creation_ratio || 1.0,
+          );
         } else {
           content = renderModelPrice(
             logs[i].prompt_tokens,

+ 0 - 790
web/src/components/SafetySetting.js

@@ -1,790 +0,0 @@
-import React, { useEffect, useState } from 'react';
-import {
-  Button,
-  Divider,
-  Form,
-  Grid,
-  Header,
-  Message,
-  Modal,
-} from 'semantic-ui-react';
-import { API, removeTrailingSlash, showError, verifyJSON } from '../helpers';
-
-import { useTheme } from '../context/Theme';
-
-const SafetySetting = () => {
-  let [inputs, setInputs] = useState({
-    PasswordLoginEnabled: '',
-    PasswordRegisterEnabled: '',
-    EmailVerificationEnabled: '',
-    GitHubOAuthEnabled: '',
-    GitHubClientId: '',
-    GitHubClientSecret: '',
-    Notice: '',
-    SMTPServer: '',
-    SMTPPort: '',
-    SMTPAccount: '',
-    SMTPFrom: '',
-    SMTPToken: '',
-    ServerAddress: '',
-    WorkerUrl: '',
-    WorkerValidKey: '',
-    EpayId: '',
-    EpayKey: '',
-    Price: 7.3,
-    MinTopUp: 1,
-    TopupGroupRatio: '',
-    PayAddress: '',
-    CustomCallbackAddress: '',
-    Footer: '',
-    WeChatAuthEnabled: '',
-    WeChatServerAddress: '',
-    WeChatServerToken: '',
-    WeChatAccountQRCodeImageURL: '',
-    TurnstileCheckEnabled: '',
-    TurnstileSiteKey: '',
-    TurnstileSecretKey: '',
-    RegisterEnabled: '',
-    EmailDomainRestrictionEnabled: '',
-    EmailAliasRestrictionEnabled: '',
-    SMTPSSLEnabled: '',
-    EmailDomainWhitelist: [],
-    // telegram login
-    TelegramOAuthEnabled: '',
-    TelegramBotToken: '',
-    TelegramBotName: '',
-  });
-  const [originInputs, setOriginInputs] = useState({});
-  let [loading, setLoading] = useState(false);
-  const [EmailDomainWhitelist, setEmailDomainWhitelist] = useState([]);
-  const [restrictedDomainInput, setRestrictedDomainInput] = useState('');
-  const [showPasswordWarningModal, setShowPasswordWarningModal] =
-    useState(false);
-
-  const theme = useTheme();
-  const isDark = theme === 'dark';
-
-  const getOptions = async () => {
-    const res = await API.get('/api/option/');
-    const { success, message, data } = res.data;
-    if (success) {
-      let newInputs = {};
-      data.forEach((item) => {
-        if (item.key === 'TopupGroupRatio') {
-          item.value = JSON.stringify(JSON.parse(item.value), null, 2);
-        }
-        newInputs[item.key] = item.value;
-      });
-      setInputs({
-        ...newInputs,
-        EmailDomainWhitelist: newInputs.EmailDomainWhitelist.split(','),
-      });
-      setOriginInputs(newInputs);
-
-      setEmailDomainWhitelist(
-        newInputs.EmailDomainWhitelist.split(',').map((item) => {
-          return { key: item, text: item, value: item };
-        }),
-      );
-    } else {
-      showError(message);
-    }
-  };
-
-  useEffect(() => {
-    getOptions().then();
-  }, []);
-  useEffect(() => {}, [inputs.EmailDomainWhitelist]);
-
-  const updateOption = async (key, value) => {
-    setLoading(true);
-    switch (key) {
-      case 'PasswordLoginEnabled':
-      case 'PasswordRegisterEnabled':
-      case 'EmailVerificationEnabled':
-      case 'GitHubOAuthEnabled':
-      case 'WeChatAuthEnabled':
-      case 'TelegramOAuthEnabled':
-      case 'TurnstileCheckEnabled':
-      case 'EmailDomainRestrictionEnabled':
-      case 'EmailAliasRestrictionEnabled':
-      case 'SMTPSSLEnabled':
-      case 'RegisterEnabled':
-        value = inputs[key] === 'true' ? 'false' : 'true';
-        break;
-      default:
-        break;
-    }
-    const res = await API.put('/api/option/', {
-      key,
-      value,
-    });
-    const { success, message } = res.data;
-    if (success) {
-      if (key === 'EmailDomainWhitelist') {
-        value = value.split(',');
-      }
-      if (key === 'Price') {
-        value = parseFloat(value);
-      }
-      setInputs((inputs) => ({
-        ...inputs,
-        [key]: value,
-      }));
-    } else {
-      showError(message);
-    }
-    setLoading(false);
-  };
-
-  const handleInputChange = async (e, { name, value }) => {
-    if (name === 'PasswordLoginEnabled' && inputs[name] === 'true') {
-      // block disabling password login
-      setShowPasswordWarningModal(true);
-      return;
-    }
-    if (
-      name === 'Notice' ||
-      (name.startsWith('SMTP') && name !== 'SMTPSSLEnabled') ||
-      name === 'ServerAddress' ||
-      name === 'WorkerUrl' ||
-      name === 'WorkerValidKey' ||
-      name === 'EpayId' ||
-      name === 'EpayKey' ||
-      name === 'Price' ||
-      name === 'PayAddress' ||
-      name === 'GitHubClientId' ||
-      name === 'GitHubClientSecret' ||
-      name === 'WeChatServerAddress' ||
-      name === 'WeChatServerToken' ||
-      name === 'WeChatAccountQRCodeImageURL' ||
-      name === 'TurnstileSiteKey' ||
-      name === 'TurnstileSecretKey' ||
-      name === 'EmailDomainWhitelist' ||
-      name === 'TopupGroupRatio' ||
-      name === 'TelegramBotToken' ||
-      name === 'TelegramBotName'
-    ) {
-      setInputs((inputs) => ({ ...inputs, [name]: value }));
-    } else {
-      await updateOption(name, value);
-    }
-  };
-
-  const submitServerAddress = async () => {
-    let ServerAddress = removeTrailingSlash(inputs.ServerAddress);
-    await updateOption('ServerAddress', ServerAddress);
-  };
-
-  const submitWorker = async () => {
-    let WorkerUrl = removeTrailingSlash(inputs.WorkerUrl);
-    await updateOption('WorkerUrl', WorkerUrl);
-    if (inputs.WorkerValidKey !== '') {
-      await updateOption('WorkerValidKey', inputs.WorkerValidKey);
-    }
-  }
-
-  const submitPayAddress = async () => {
-    if (inputs.ServerAddress === '') {
-      showError('请先填写服务器地址');
-      return;
-    }
-    if (originInputs['TopupGroupRatio'] !== inputs.TopupGroupRatio) {
-      if (!verifyJSON(inputs.TopupGroupRatio)) {
-        showError('充值分组倍率不是合法的 JSON 字符串');
-        return;
-      }
-      await updateOption('TopupGroupRatio', inputs.TopupGroupRatio);
-    }
-    let PayAddress = removeTrailingSlash(inputs.PayAddress);
-    await updateOption('PayAddress', PayAddress);
-    if (inputs.EpayId !== '') {
-      await updateOption('EpayId', inputs.EpayId);
-    }
-    if (inputs.EpayKey !== undefined && inputs.EpayKey !== '') {
-      await updateOption('EpayKey', inputs.EpayKey);
-    }
-    await updateOption('Price', '' + inputs.Price);
-  };
-
-  const submitSMTP = async () => {
-    if (originInputs['SMTPServer'] !== inputs.SMTPServer) {
-      await updateOption('SMTPServer', inputs.SMTPServer);
-    }
-    if (originInputs['SMTPAccount'] !== inputs.SMTPAccount) {
-      await updateOption('SMTPAccount', inputs.SMTPAccount);
-    }
-    if (originInputs['SMTPFrom'] !== inputs.SMTPFrom) {
-      await updateOption('SMTPFrom', inputs.SMTPFrom);
-    }
-    if (
-      originInputs['SMTPPort'] !== inputs.SMTPPort &&
-      inputs.SMTPPort !== ''
-    ) {
-      await updateOption('SMTPPort', inputs.SMTPPort);
-    }
-    if (
-      originInputs['SMTPToken'] !== inputs.SMTPToken &&
-      inputs.SMTPToken !== ''
-    ) {
-      await updateOption('SMTPToken', inputs.SMTPToken);
-    }
-  };
-
-  const submitEmailDomainWhitelist = async () => {
-    if (
-      originInputs['EmailDomainWhitelist'] !==
-        inputs.EmailDomainWhitelist.join(',') &&
-      inputs.SMTPToken !== ''
-    ) {
-      await updateOption(
-        'EmailDomainWhitelist',
-        inputs.EmailDomainWhitelist.join(','),
-      );
-    }
-  };
-
-  const submitWeChat = async () => {
-    if (originInputs['WeChatServerAddress'] !== inputs.WeChatServerAddress) {
-      await updateOption(
-        'WeChatServerAddress',
-        removeTrailingSlash(inputs.WeChatServerAddress),
-      );
-    }
-    if (
-      originInputs['WeChatAccountQRCodeImageURL'] !==
-      inputs.WeChatAccountQRCodeImageURL
-    ) {
-      await updateOption(
-        'WeChatAccountQRCodeImageURL',
-        inputs.WeChatAccountQRCodeImageURL,
-      );
-    }
-    if (
-      originInputs['WeChatServerToken'] !== inputs.WeChatServerToken &&
-      inputs.WeChatServerToken !== ''
-    ) {
-      await updateOption('WeChatServerToken', inputs.WeChatServerToken);
-    }
-  };
-
-  const submitGitHubOAuth = async () => {
-    if (originInputs['GitHubClientId'] !== inputs.GitHubClientId) {
-      await updateOption('GitHubClientId', inputs.GitHubClientId);
-    }
-    if (
-      originInputs['GitHubClientSecret'] !== inputs.GitHubClientSecret &&
-      inputs.GitHubClientSecret !== ''
-    ) {
-      await updateOption('GitHubClientSecret', inputs.GitHubClientSecret);
-    }
-  };
-
-  const submitTelegramSettings = async () => {
-    // await updateOption('TelegramOAuthEnabled', inputs.TelegramOAuthEnabled);
-    await updateOption('TelegramBotToken', inputs.TelegramBotToken);
-    await updateOption('TelegramBotName', inputs.TelegramBotName);
-  };
-
-  const submitTurnstile = async () => {
-    if (originInputs['TurnstileSiteKey'] !== inputs.TurnstileSiteKey) {
-      await updateOption('TurnstileSiteKey', inputs.TurnstileSiteKey);
-    }
-    if (
-      originInputs['TurnstileSecretKey'] !== inputs.TurnstileSecretKey &&
-      inputs.TurnstileSecretKey !== ''
-    ) {
-      await updateOption('TurnstileSecretKey', inputs.TurnstileSecretKey);
-    }
-  };
-
-  const submitNewRestrictedDomain = () => {
-    const localDomainList = inputs.EmailDomainWhitelist;
-    if (
-      restrictedDomainInput !== '' &&
-      !localDomainList.includes(restrictedDomainInput)
-    ) {
-      setRestrictedDomainInput('');
-      setInputs({
-        ...inputs,
-        EmailDomainWhitelist: [...localDomainList, restrictedDomainInput],
-      });
-      setEmailDomainWhitelist([
-        ...EmailDomainWhitelist,
-        {
-          key: restrictedDomainInput,
-          text: restrictedDomainInput,
-          value: restrictedDomainInput,
-        },
-      ]);
-    }
-  };
-
-  return (
-    <Grid columns={1}>
-      <Grid.Column>
-        <Form loading={loading} inverted={isDark}>
-          <Header as='h3' inverted={isDark}>
-            通用设置
-          </Header>
-          <Form.Group widths='equal'>
-            <Form.Input
-              label='服务器地址'
-              placeholder='例如:https://yourdomain.com'
-              value={inputs.ServerAddress}
-              name='ServerAddress'
-              onChange={handleInputChange}
-            />
-          </Form.Group>
-          <Form.Button onClick={submitServerAddress}>
-            更新服务器地址
-          </Form.Button>
-          <Header as='h3' inverted={isDark}>
-            代理设置(支持 <a href='https://github.com/Calcium-Ion/new-api-worker' target='_blank' rel='noreferrer'>new-api-worker</a>)
-          </Header>
-          <Form.Group widths='equal'>
-            <Form.Input
-              label='Worker地址,不填写则不启用代理'
-              placeholder='例如:https://workername.yourdomain.workers.dev'
-              value={inputs.WorkerUrl}
-              name='WorkerUrl'
-              onChange={handleInputChange}
-            />
-            <Form.Input
-              label='Worker密钥,根据你部署的 Worker 填写'
-              placeholder='例如:your_secret_key'
-              value={inputs.WorkerValidKey}
-              name='WorkerValidKey'
-              onChange={handleInputChange}
-            />
-          </Form.Group>
-          <Form.Button onClick={submitWorker}>
-            更新Worker设置
-          </Form.Button>
-          <Divider />
-          <Header as='h3' inverted={isDark}>
-            支付设置(当前仅支持易支付接口,默认使用上方服务器地址作为回调地址!)
-          </Header>
-          <Form.Group widths='equal'>
-            <Form.Input
-              label='支付地址,不填写则不启用在线支付'
-              placeholder='例如:https://yourdomain.com'
-              value={inputs.PayAddress}
-              name='PayAddress'
-              onChange={handleInputChange}
-            />
-            <Form.Input
-              label='易支付商户ID'
-              placeholder='例如:0001'
-              value={inputs.EpayId}
-              name='EpayId'
-              onChange={handleInputChange}
-            />
-            <Form.Input
-              label='易支付商户密钥'
-              placeholder='敏感信息不会发送到前端显示'
-              value={inputs.EpayKey}
-              name='EpayKey'
-              onChange={handleInputChange}
-            />
-          </Form.Group>
-          <Form.Group widths='equal'>
-            <Form.Input
-              label='回调地址,不填写则使用上方服务器地址作为回调地址'
-              placeholder='例如:https://yourdomain.com'
-              value={inputs.CustomCallbackAddress}
-              name='CustomCallbackAddress'
-              onChange={handleInputChange}
-            />
-            <Form.Input
-              label='充值价格(x元/美金)'
-              placeholder='例如:7,就是7元/美金'
-              value={inputs.Price}
-              name='Price'
-              min={0}
-              onChange={handleInputChange}
-            />
-            <Form.Input
-              label='最低充值美元数量(以美金为单位,如果使用额度请自行换算!)'
-              placeholder='例如:2,就是最低充值2$'
-              value={inputs.MinTopUp}
-              name='MinTopUp'
-              min={1}
-              onChange={handleInputChange}
-            />
-          </Form.Group>
-          <Form.Group widths='equal'>
-            <Form.TextArea
-              label='充值分组倍率'
-              name='TopupGroupRatio'
-              onChange={handleInputChange}
-              style={{ minHeight: 250, fontFamily: 'JetBrains Mono, Consolas' }}
-              autoComplete='new-password'
-              value={inputs.TopupGroupRatio}
-              placeholder='为一个 JSON 文本,键为组名称,值为倍率'
-            />
-          </Form.Group>
-          <Form.Button onClick={submitPayAddress}>更新支付设置</Form.Button>
-          <Divider />
-          <Header as='h3' inverted={isDark}>
-            配置登录注册
-          </Header>
-          <Form.Group inline>
-            <Form.Checkbox
-              checked={inputs.PasswordLoginEnabled === 'true'}
-              label='允许通过密码进行登录'
-              name='PasswordLoginEnabled'
-              onChange={handleInputChange}
-            />
-            {showPasswordWarningModal && (
-              <Modal
-                open={showPasswordWarningModal}
-                onClose={() => setShowPasswordWarningModal(false)}
-                size={'tiny'}
-                style={{ maxWidth: '450px' }}
-              >
-                <Modal.Header>警告</Modal.Header>
-                <Modal.Content>
-                  <p>
-                    取消密码登录将导致所有未绑定其他登录方式的用户(包括管理员)无法通过密码登录,确认取消?
-                  </p>
-                </Modal.Content>
-                <Modal.Actions>
-                  <Button onClick={() => setShowPasswordWarningModal(false)}>
-                    取消
-                  </Button>
-                  <Button
-                    color='yellow'
-                    onClick={async () => {
-                      setShowPasswordWarningModal(false);
-                      await updateOption('PasswordLoginEnabled', 'false');
-                    }}
-                  >
-                    确定
-                  </Button>
-                </Modal.Actions>
-              </Modal>
-            )}
-            <Form.Checkbox
-              checked={inputs.PasswordRegisterEnabled === 'true'}
-              label='允许通过密码进行注册'
-              name='PasswordRegisterEnabled'
-              onChange={handleInputChange}
-            />
-            <Form.Checkbox
-              checked={inputs.EmailVerificationEnabled === 'true'}
-              label='通过密码注册时需要进行邮箱验证'
-              name='EmailVerificationEnabled'
-              onChange={handleInputChange}
-            />
-            <Form.Checkbox
-              checked={inputs.GitHubOAuthEnabled === 'true'}
-              label='允许通过 GitHub 账户登录 & 注册'
-              name='GitHubOAuthEnabled'
-              onChange={handleInputChange}
-            />
-            <Form.Checkbox
-              checked={inputs.WeChatAuthEnabled === 'true'}
-              label='允许通过微信登录 & 注册'
-              name='WeChatAuthEnabled'
-              onChange={handleInputChange}
-            />
-            <Form.Checkbox
-              checked={inputs.TelegramOAuthEnabled === 'true'}
-              label='允许通过 Telegram 进行登录'
-              name='TelegramOAuthEnabled'
-              onChange={handleInputChange}
-            />
-          </Form.Group>
-          <Form.Group inline>
-            <Form.Checkbox
-              checked={inputs.RegisterEnabled === 'true'}
-              label='允许新用户注册(此项为否时,新用户将无法以任何方式进行注册)'
-              name='RegisterEnabled'
-              onChange={handleInputChange}
-            />
-            <Form.Checkbox
-              checked={inputs.TurnstileCheckEnabled === 'true'}
-              label='启用 Turnstile 用户校验'
-              name='TurnstileCheckEnabled'
-              onChange={handleInputChange}
-            />
-          </Form.Group>
-          <Divider />
-          <Header as='h3' inverted={isDark}>
-            配置邮箱域名白名单
-            <Header.Subheader>
-              用以防止恶意用户利用临时邮箱批量注册
-            </Header.Subheader>
-          </Header>
-          <Form.Group widths={3}>
-            <Form.Checkbox
-              label='启用邮箱域名白名单'
-              name='EmailDomainRestrictionEnabled'
-              onChange={handleInputChange}
-              checked={inputs.EmailDomainRestrictionEnabled === 'true'}
-            />
-          </Form.Group>
-          <Form.Group widths={3}>
-            <Form.Checkbox
-              label='启用邮箱别名限制(例如:ab.cd@gmail.com)'
-              name='EmailAliasRestrictionEnabled'
-              onChange={handleInputChange}
-              checked={inputs.EmailAliasRestrictionEnabled === 'true'}
-            />
-          </Form.Group>
-          <Form.Group widths={2}>
-            <Form.Dropdown
-              label='允许的邮箱域名'
-              placeholder='允许的邮箱域名'
-              name='EmailDomainWhitelist'
-              required
-              fluid
-              multiple
-              selection
-              onChange={handleInputChange}
-              value={inputs.EmailDomainWhitelist}
-              autoComplete='new-password'
-              options={EmailDomainWhitelist}
-            />
-            <Form.Input
-              label='添加新的允许的邮箱域名'
-              action={
-                <Button
-                  type='button'
-                  onClick={() => {
-                    submitNewRestrictedDomain();
-                  }}
-                >
-                  填入
-                </Button>
-              }
-              onKeyDown={(e) => {
-                if (e.key === 'Enter') {
-                  submitNewRestrictedDomain();
-                }
-              }}
-              autoComplete='new-password'
-              placeholder='输入新的允许的邮箱域名'
-              value={restrictedDomainInput}
-              onChange={(e, { value }) => {
-                setRestrictedDomainInput(value);
-              }}
-            />
-          </Form.Group>
-          <Form.Button onClick={submitEmailDomainWhitelist}>
-            保存邮箱域名白名单设置
-          </Form.Button>
-          <Divider />
-          <Header as='h3' inverted={isDark}>
-            配置 SMTP
-            <Header.Subheader>用以支持系统的邮件发送</Header.Subheader>
-          </Header>
-          <Form.Group widths={3}>
-            <Form.Input
-              label='SMTP 服务器地址'
-              name='SMTPServer'
-              onChange={handleInputChange}
-              autoComplete='new-password'
-              value={inputs.SMTPServer}
-              placeholder='例如:smtp.qq.com'
-            />
-            <Form.Input
-              label='SMTP 端口'
-              name='SMTPPort'
-              onChange={handleInputChange}
-              autoComplete='new-password'
-              value={inputs.SMTPPort}
-              placeholder='默认: 587'
-            />
-            <Form.Input
-              label='SMTP 账户'
-              name='SMTPAccount'
-              onChange={handleInputChange}
-              autoComplete='new-password'
-              value={inputs.SMTPAccount}
-              placeholder='通常是邮箱地址'
-            />
-          </Form.Group>
-          <Form.Group widths={3}>
-            <Form.Input
-              label='SMTP 发送者邮箱'
-              name='SMTPFrom'
-              onChange={handleInputChange}
-              autoComplete='new-password'
-              value={inputs.SMTPFrom}
-              placeholder='通常和邮箱地址保持一致'
-            />
-            <Form.Input
-              label='SMTP 访问凭证'
-              name='SMTPToken'
-              onChange={handleInputChange}
-              type='password'
-              autoComplete='new-password'
-              checked={inputs.RegisterEnabled === 'true'}
-              placeholder='敏感信息不会发送到前端显示'
-            />
-          </Form.Group>
-          <Form.Group widths={3}>
-            <Form.Checkbox
-              label='启用SMTP SSL(465端口强制开启)'
-              name='SMTPSSLEnabled'
-              onChange={handleInputChange}
-              checked={inputs.SMTPSSLEnabled === 'true'}
-            />
-          </Form.Group>
-          <Form.Button onClick={submitSMTP}>保存 SMTP 设置</Form.Button>
-          <Divider />
-          <Header as='h3' inverted={isDark}>
-            配置 GitHub OAuth App
-            <Header.Subheader>
-              用以支持通过 GitHub 进行登录注册,
-              <a
-                href='https://github.com/settings/developers'
-                target='_blank'
-                rel='noreferrer'
-              >
-                点击此处
-              </a>
-              管理你的 GitHub OAuth App
-            </Header.Subheader>
-          </Header>
-          <Message>
-            Homepage URL 填 <code>{inputs.ServerAddress}</code>
-            ,Authorization callback URL 填{' '}
-            <code>{`${inputs.ServerAddress}/oauth/github`}</code>
-          </Message>
-          <Form.Group widths={3}>
-            <Form.Input
-              label='GitHub Client ID'
-              name='GitHubClientId'
-              onChange={handleInputChange}
-              autoComplete='new-password'
-              value={inputs.GitHubClientId}
-              placeholder='输入你注册的 GitHub OAuth APP 的 ID'
-            />
-            <Form.Input
-              label='GitHub Client Secret'
-              name='GitHubClientSecret'
-              onChange={handleInputChange}
-              type='password'
-              autoComplete='new-password'
-              value={inputs.GitHubClientSecret}
-              placeholder='敏感信息不会发送到前端显示'
-            />
-          </Form.Group>
-          <Form.Button onClick={submitGitHubOAuth}>
-            保存 GitHub OAuth 设置
-          </Form.Button>
-          <Divider />
-          <Header as='h3' inverted={isDark}>
-            配置 WeChat Server
-            <Header.Subheader>
-              用以支持通过微信进行登录注册,
-              <a
-                href='https://github.com/songquanpeng/wechat-server'
-                target='_blank'
-                rel='noreferrer'
-              >
-                点击此处
-              </a>
-              了解 WeChat Server
-            </Header.Subheader>
-          </Header>
-          <Form.Group widths={3}>
-            <Form.Input
-              label='WeChat Server 服务器地址'
-              name='WeChatServerAddress'
-              placeholder='例如:https://yourdomain.com'
-              onChange={handleInputChange}
-              autoComplete='new-password'
-              value={inputs.WeChatServerAddress}
-            />
-            <Form.Input
-              label='WeChat Server 访问凭证'
-              name='WeChatServerToken'
-              type='password'
-              onChange={handleInputChange}
-              autoComplete='new-password'
-              value={inputs.WeChatServerToken}
-              placeholder='敏感信息不会发送到前端显示'
-            />
-            <Form.Input
-              label='微信公众号二维码图片链接'
-              name='WeChatAccountQRCodeImageURL'
-              onChange={handleInputChange}
-              autoComplete='new-password'
-              value={inputs.WeChatAccountQRCodeImageURL}
-              placeholder='输入一个图片链接'
-            />
-          </Form.Group>
-          <Form.Button onClick={submitWeChat}>
-            保存 WeChat Server 设置
-          </Form.Button>
-          <Divider />
-          <Header as='h3' inverted={isDark}>
-            配置 Telegram 登录
-          </Header>
-          <Form.Group inline>
-            <Form.Input
-              label='Telegram Bot Token'
-              name='TelegramBotToken'
-              onChange={handleInputChange}
-              value={inputs.TelegramBotToken}
-              placeholder='输入你的 Telegram Bot Token'
-            />
-            <Form.Input
-              label='Telegram Bot 名称'
-              name='TelegramBotName'
-              onChange={handleInputChange}
-              value={inputs.TelegramBotName}
-              placeholder='输入你的 Telegram Bot 名称'
-            />
-          </Form.Group>
-          <Form.Button onClick={submitTelegramSettings}>
-            保存 Telegram 登录设置
-          </Form.Button>
-          <Divider />
-          <Header as='h3' inverted={isDark}>
-            配置 Turnstile
-            <Header.Subheader>
-              用以支持用户校验,
-              <a
-                href='https://dash.cloudflare.com/'
-                target='_blank'
-                rel='noreferrer'
-              >
-                点击此处
-              </a>
-              管理你的 Turnstile Sites,推荐选择 Invisible Widget Type
-            </Header.Subheader>
-          </Header>
-          <Form.Group widths={3}>
-            <Form.Input
-              label='Turnstile Site Key'
-              name='TurnstileSiteKey'
-              onChange={handleInputChange}
-              autoComplete='new-password'
-              value={inputs.TurnstileSiteKey}
-              placeholder='输入你注册的 Turnstile Site Key'
-            />
-            <Form.Input
-              label='Turnstile Secret Key'
-              name='TurnstileSecretKey'
-              onChange={handleInputChange}
-              type='password'
-              autoComplete='new-password'
-              value={inputs.TurnstileSecretKey}
-              placeholder='敏感信息不会发送到前端显示'
-            />
-          </Form.Group>
-          <Form.Button onClick={submitTurnstile}>
-            保存 Turnstile 设置
-          </Form.Button>
-        </Form>
-      </Grid.Column>
-    </Grid>
-  );
-};
-
-export default SystemSetting;

+ 9 - 3
web/src/constants/channel.constants.js

@@ -80,11 +80,12 @@ export const CHANNEL_OPTIONS = [
     label: 'Google PaLM2'
   },
   {
-    value: 45,
+    value: 47,
     color: 'blue',
-    label: '字节火山方舟、豆包、DeepSeek通用'
+    label: 'Xinference'
   },
   { value: 25, color: 'green', label: 'Moonshot' },
+  { value: 20, color: 'green', label: 'OpenRouter' },
   { value: 19, color: 'blue', label: '360 智脑' },
   { value: 23, color: 'teal', label: '腾讯混元' },
   { value: 31, color: 'green', label: '零一万物' },
@@ -108,5 +109,10 @@ export const CHANNEL_OPTIONS = [
     value: 44,
     color: 'purple',
     label: '嵌入模型:MokaAI M3E'
-  }
+  },
+  {
+    value: 45,
+    color: 'blue',
+    label: '字节火山方舟、豆包、DeepSeek通用'
+  },
 ];

+ 193 - 4
web/src/helpers/render.js

@@ -325,9 +325,8 @@ export function renderModelPrice(
     return (
       <>
         <article>
-          <p>{i18next.t('提示价格:${{price}} = ${{total}} / 1M tokens', {
+          <p>{i18next.t('提示价格:${{price}} / 1M tokens', {
             price: inputRatioPrice,
-            total: inputRatioPrice
           })}</p>
           <p>{i18next.t('补全价格:${{price}} * {{completionRatio}} = ${{total}} / 1M tokens (补全倍率: {{completionRatio}})', {
             price: inputRatioPrice,
@@ -445,9 +444,8 @@ export function renderAudioModelPrice(
     return (
       <>
         <article>
-          <p>{i18next.t('提示价格:${{price}} = ${{total}} / 1M tokens', {
+          <p>{i18next.t('提示价格:${{price}} / 1M tokens', {
             price: inputRatioPrice,
-            total: inputRatioPrice
           })}</p>
           <p>{i18next.t('补全价格:${{price}} * {{completionRatio}} = ${{total}} / 1M tokens (补全倍率: {{completionRatio}})', {
             price: inputRatioPrice,
@@ -654,3 +652,194 @@ export function stringToColor(str) {
   let i = sum % colors.length;
   return colors[i];
 }
+
+export function renderClaudeModelPrice(
+  inputTokens,
+  completionTokens,
+  modelRatio,
+  modelPrice = -1,
+  completionRatio,
+  groupRatio,
+  cacheTokens = 0,
+  cacheRatio = 1.0,
+  cacheCreationTokens = 0,
+  cacheCreationRatio = 1.0,
+) {
+  const ratioLabel = false ? i18next.t('专属倍率') : i18next.t('分组倍率');
+
+  if (modelPrice !== -1) {
+    return i18next.t('模型价格:${{price}} * {{ratioType}}:{{ratio}} = ${{total}}', {
+      price: modelPrice,
+      ratioType: ratioLabel,
+      ratio: groupRatio,
+      total: modelPrice * groupRatio
+    });
+  } else {
+    if (completionRatio === undefined) {
+      completionRatio = 0;
+    }
+
+    const completionRatioValue = completionRatio || 0;
+    const inputRatioPrice = modelRatio * 2.0;
+    const completionRatioPrice = modelRatio * 2.0 * completionRatioValue;
+    let cacheRatioPrice = (modelRatio * 2.0 * cacheRatio).toFixed(2);
+    let cacheCreationRatioPrice = modelRatio * 2.0 * cacheCreationRatio;
+
+    // Calculate effective input tokens (non-cached + cached with ratio applied + cache creation with ratio applied)
+    const nonCachedTokens = inputTokens;
+    const effectiveInputTokens = nonCachedTokens +
+      (cacheTokens * cacheRatio) +
+      (cacheCreationTokens * cacheCreationRatio);
+
+    let price =
+      (effectiveInputTokens / 1000000) * inputRatioPrice * groupRatio +
+      (completionTokens / 1000000) * completionRatioPrice * groupRatio;
+
+    return (
+      <>
+        <article>
+          <p>{i18next.t('提示价格:${{price}} / 1M tokens', {
+            price: inputRatioPrice,
+          })}</p>
+          <p>{i18next.t('补全价格:${{price}} * {{ratio}} = ${{total}} / 1M tokens', {
+            price: inputRatioPrice,
+            ratio: completionRatio,
+            total: completionRatioPrice
+          })}</p>
+          {cacheTokens > 0 && (
+            <p>{i18next.t('缓存价格:${{price}} * {{ratio}} = ${{total}} / 1M tokens (缓存倍率: {{cacheRatio}})', {
+              price: inputRatioPrice,
+              ratio: cacheRatio,
+              total: cacheRatioPrice,
+              cacheRatio: cacheRatio
+            })}</p>
+          )}
+          {cacheCreationTokens > 0 && (
+            <p>{i18next.t('缓存创建价格:${{price}} * {{ratio}} = ${{total}} / 1M tokens (缓存创建倍率: {{cacheCreationRatio}})', {
+              price: inputRatioPrice,
+              ratio: cacheCreationRatio,
+              total: cacheCreationRatioPrice,
+              cacheCreationRatio: cacheCreationRatio
+            })}</p>
+          )}
+          <p></p>
+          <p>
+            {(cacheTokens > 0 || cacheCreationTokens > 0) ?
+              i18next.t('提示 {{nonCacheInput}} tokens / 1M tokens * ${{price}} + 缓存 {{cacheInput}} tokens / 1M tokens * ${{cachePrice}} + 缓存创建 {{cacheCreationInput}} tokens / 1M tokens * ${{cacheCreationPrice}} + 补全 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} = ${{total}}', {
+                nonCacheInput: nonCachedTokens,
+                cacheInput: cacheTokens,
+                cacheRatio: cacheRatio,
+                cacheCreationInput: cacheCreationTokens,
+                cacheCreationRatio: cacheCreationRatio,
+                cachePrice: cacheRatioPrice,
+                cacheCreationPrice: cacheCreationRatioPrice,
+                price: inputRatioPrice,
+                completion: completionTokens,
+                compPrice: completionRatioPrice,
+                ratio: groupRatio,
+                total: price.toFixed(6)
+              }) :
+              i18next.t('提示 {{input}} tokens / 1M tokens * ${{price}} + 补全 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} = ${{total}}', {
+                input: inputTokens,
+                price: inputRatioPrice,
+                completion: completionTokens,
+                compPrice: completionRatioPrice,
+                ratio: groupRatio,
+                total: price.toFixed(6)
+              })
+            }
+          </p>
+          <p>{i18next.t('仅供参考,以实际扣费为准')}</p>
+        </article>
+      </>
+    );
+  }
+}
+
+export function renderClaudeLogContent(
+  modelRatio,
+  completionRatio,
+  modelPrice = -1,
+  groupRatio,
+  cacheRatio = 1.0,
+  cacheCreationRatio = 1.0,
+) {
+  const ratioLabel = false ? i18next.t('专属倍率') : i18next.t('分组倍率');
+
+  if (modelPrice !== -1) {
+    return i18next.t('模型价格 ${{price}},{{ratioType}} {{ratio}}', {
+      price: modelPrice,
+      ratioType: ratioLabel,
+      ratio: groupRatio
+    });
+  } else {
+    return i18next.t('模型倍率 {{modelRatio}},补全倍率 {{completionRatio}},缓存倍率 {{cacheRatio}},缓存创建倍率 {{cacheCreationRatio}},{{ratioType}} {{ratio}}', {
+      modelRatio: modelRatio,
+      completionRatio: completionRatio,
+      cacheRatio: cacheRatio,
+      cacheCreationRatio: cacheCreationRatio,
+      ratioType: ratioLabel,
+      ratio: groupRatio
+    });
+  }
+}
+
+export function renderClaudeModelPriceSimple(
+  modelRatio,
+  modelPrice = -1,
+  groupRatio,
+  cacheTokens = 0,
+  cacheRatio = 1.0,
+  cacheCreationTokens = 0,
+  cacheCreationRatio = 1.0,
+) {
+  const ratioLabel = false ? i18next.t('专属倍率') : i18next.t('分组');
+
+  if (modelPrice !== -1) {
+    return i18next.t('价格:${{price}} * {{ratioType}}:{{ratio}}', {
+      price: modelPrice,
+      ratioType: ratioLabel,
+      ratio: groupRatio
+    });
+  } else {
+    if (cacheTokens !== 0 || cacheCreationTokens !== 0) {
+      return i18next.t('模型: {{ratio}} * {{ratioType}}: {{groupRatio}} * 缓存: {{cacheRatio}}', {
+        ratio: modelRatio,
+        ratioType: ratioLabel,
+        groupRatio: groupRatio,
+        cacheRatio: cacheRatio,
+        cacheCreationRatio: cacheCreationRatio
+      });
+    } else {
+      return i18next.t('模型: {{ratio}} * {{ratioType}}: {{groupRatio}}', {
+        ratio: modelRatio,
+        ratioType: ratioLabel,
+        groupRatio: groupRatio
+      });
+    }
+  }
+}
+
+export function renderLogContent(
+  modelRatio,
+  completionRatio,
+  modelPrice = -1,
+  groupRatio
+) {
+  const ratioLabel = false ? i18next.t('专属倍率') : i18next.t('分组倍率');
+
+  if (modelPrice !== -1) {
+    return i18next.t('模型价格 ${{price}},{{ratioType}} {{ratio}}', {
+      price: modelPrice,
+      ratioType: ratioLabel,
+      ratio: groupRatio
+    });
+  } else {
+    return i18next.t('模型倍率 {{modelRatio}},补全倍率 {{completionRatio}},{{ratioType}} {{ratio}}', {
+      modelRatio: modelRatio,
+      completionRatio: completionRatio,
+      ratioType: ratioLabel,
+      ratio: groupRatio
+    });
+  }
+}