adaptor.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. package vertex
  2. import (
  3. "encoding/json"
  4. "errors"
  5. "fmt"
  6. "io"
  7. "net/http"
  8. "strings"
  9. "github.com/QuantumNous/new-api/common"
  10. "github.com/QuantumNous/new-api/dto"
  11. "github.com/QuantumNous/new-api/relay/channel"
  12. "github.com/QuantumNous/new-api/relay/channel/claude"
  13. "github.com/QuantumNous/new-api/relay/channel/gemini"
  14. "github.com/QuantumNous/new-api/relay/channel/openai"
  15. relaycommon "github.com/QuantumNous/new-api/relay/common"
  16. "github.com/QuantumNous/new-api/relay/constant"
  17. "github.com/QuantumNous/new-api/setting/model_setting"
  18. "github.com/QuantumNous/new-api/types"
  19. "github.com/gin-gonic/gin"
  20. )
  21. const (
  22. RequestModeClaude = 1
  23. RequestModeGemini = 2
  24. RequestModeLlama = 3
  25. )
  26. var claudeModelMap = map[string]string{
  27. "claude-3-sonnet-20240229": "claude-3-sonnet@20240229",
  28. "claude-3-opus-20240229": "claude-3-opus@20240229",
  29. "claude-3-haiku-20240307": "claude-3-haiku@20240307",
  30. "claude-3-5-sonnet-20240620": "claude-3-5-sonnet@20240620",
  31. "claude-3-5-sonnet-20241022": "claude-3-5-sonnet-v2@20241022",
  32. "claude-3-7-sonnet-20250219": "claude-3-7-sonnet@20250219",
  33. "claude-sonnet-4-20250514": "claude-sonnet-4@20250514",
  34. "claude-opus-4-20250514": "claude-opus-4@20250514",
  35. "claude-opus-4-1-20250805": "claude-opus-4-1@20250805",
  36. "claude-sonnet-4-5-20250929": "claude-sonnet-4-5@20250929",
  37. }
  38. const anthropicVersion = "vertex-2023-10-16"
  39. type Adaptor struct {
  40. RequestMode int
  41. AccountCredentials Credentials
  42. }
  43. func (a *Adaptor) ConvertGeminiRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeminiChatRequest) (any, error) {
  44. geminiAdaptor := gemini.Adaptor{}
  45. return geminiAdaptor.ConvertGeminiRequest(c, info, request)
  46. }
  47. func (a *Adaptor) ConvertClaudeRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.ClaudeRequest) (any, error) {
  48. if v, ok := claudeModelMap[info.UpstreamModelName]; ok {
  49. c.Set("request_model", v)
  50. } else {
  51. c.Set("request_model", request.Model)
  52. }
  53. vertexClaudeReq := copyRequest(request, anthropicVersion)
  54. return vertexClaudeReq, nil
  55. }
  56. func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
  57. //TODO implement me
  58. return nil, errors.New("not implemented")
  59. }
  60. func (a *Adaptor) ConvertImageRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.ImageRequest) (any, error) {
  61. geminiAdaptor := gemini.Adaptor{}
  62. return geminiAdaptor.ConvertImageRequest(c, info, request)
  63. }
  64. func (a *Adaptor) Init(info *relaycommon.RelayInfo) {
  65. if strings.HasPrefix(info.UpstreamModelName, "claude") {
  66. a.RequestMode = RequestModeClaude
  67. } else if strings.Contains(info.UpstreamModelName, "llama") {
  68. a.RequestMode = RequestModeLlama
  69. } else {
  70. a.RequestMode = RequestModeGemini
  71. }
  72. }
  73. func (a *Adaptor) getRequestUrl(info *relaycommon.RelayInfo, modelName, suffix string) (string, error) {
  74. region := GetModelRegion(info.ApiVersion, info.OriginModelName)
  75. if info.ChannelOtherSettings.VertexKeyType != dto.VertexKeyTypeAPIKey {
  76. adc := &Credentials{}
  77. if err := common.Unmarshal([]byte(info.ApiKey), adc); err != nil {
  78. return "", fmt.Errorf("failed to decode credentials file: %w", err)
  79. }
  80. a.AccountCredentials = *adc
  81. if a.RequestMode == RequestModeGemini {
  82. if region == "global" {
  83. return fmt.Sprintf(
  84. "https://aiplatform.googleapis.com/v1/projects/%s/locations/global/publishers/google/models/%s:%s",
  85. adc.ProjectID,
  86. modelName,
  87. suffix,
  88. ), nil
  89. } else {
  90. return fmt.Sprintf(
  91. "https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:%s",
  92. region,
  93. adc.ProjectID,
  94. region,
  95. modelName,
  96. suffix,
  97. ), nil
  98. }
  99. } else if a.RequestMode == RequestModeClaude {
  100. if region == "global" {
  101. return fmt.Sprintf(
  102. "https://aiplatform.googleapis.com/v1/projects/%s/locations/global/publishers/anthropic/models/%s:%s",
  103. adc.ProjectID,
  104. modelName,
  105. suffix,
  106. ), nil
  107. } else {
  108. return fmt.Sprintf(
  109. "https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/anthropic/models/%s:%s",
  110. region,
  111. adc.ProjectID,
  112. region,
  113. modelName,
  114. suffix,
  115. ), nil
  116. }
  117. } else if a.RequestMode == RequestModeLlama {
  118. return fmt.Sprintf(
  119. "https://%s-aiplatform.googleapis.com/v1beta1/projects/%s/locations/%s/endpoints/openapi/chat/completions",
  120. region,
  121. adc.ProjectID,
  122. region,
  123. ), nil
  124. }
  125. } else {
  126. var keyPrefix string
  127. if strings.HasSuffix(suffix, "?alt=sse") {
  128. keyPrefix = "&"
  129. } else {
  130. keyPrefix = "?"
  131. }
  132. if region == "global" {
  133. return fmt.Sprintf(
  134. "https://aiplatform.googleapis.com/v1/publishers/google/models/%s:%s%skey=%s",
  135. modelName,
  136. suffix,
  137. keyPrefix,
  138. info.ApiKey,
  139. ), nil
  140. } else {
  141. return fmt.Sprintf(
  142. "https://%s-aiplatform.googleapis.com/v1/publishers/google/models/%s:%s%skey=%s",
  143. region,
  144. modelName,
  145. suffix,
  146. keyPrefix,
  147. info.ApiKey,
  148. ), nil
  149. }
  150. }
  151. return "", errors.New("unsupported request mode")
  152. }
  153. func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
  154. suffix := ""
  155. if a.RequestMode == RequestModeGemini {
  156. if model_setting.GetGeminiSettings().ThinkingAdapterEnabled &&
  157. !model_setting.ShouldPreserveThinkingSuffix(info.OriginModelName) {
  158. // 新增逻辑:处理 -thinking-<budget> 格式
  159. if strings.Contains(info.UpstreamModelName, "-thinking-") {
  160. parts := strings.Split(info.UpstreamModelName, "-thinking-")
  161. info.UpstreamModelName = parts[0]
  162. } else if strings.HasSuffix(info.UpstreamModelName, "-thinking") { // 旧的适配
  163. info.UpstreamModelName = strings.TrimSuffix(info.UpstreamModelName, "-thinking")
  164. } else if strings.HasSuffix(info.UpstreamModelName, "-nothinking") {
  165. info.UpstreamModelName = strings.TrimSuffix(info.UpstreamModelName, "-nothinking")
  166. }
  167. }
  168. if info.IsStream {
  169. suffix = "streamGenerateContent?alt=sse"
  170. } else {
  171. suffix = "generateContent"
  172. }
  173. if strings.HasPrefix(info.UpstreamModelName, "imagen") {
  174. suffix = "predict"
  175. }
  176. return a.getRequestUrl(info, info.UpstreamModelName, suffix)
  177. } else if a.RequestMode == RequestModeClaude {
  178. if info.IsStream {
  179. suffix = "streamRawPredict?alt=sse"
  180. } else {
  181. suffix = "rawPredict"
  182. }
  183. model := info.UpstreamModelName
  184. if v, ok := claudeModelMap[info.UpstreamModelName]; ok {
  185. model = v
  186. }
  187. return a.getRequestUrl(info, model, suffix)
  188. } else if a.RequestMode == RequestModeLlama {
  189. return a.getRequestUrl(info, "", "")
  190. }
  191. return "", errors.New("unsupported request mode")
  192. }
  193. func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *relaycommon.RelayInfo) error {
  194. channel.SetupApiRequestHeader(info, c, req)
  195. if info.ChannelOtherSettings.VertexKeyType != dto.VertexKeyTypeAPIKey {
  196. accessToken, err := getAccessToken(a, info)
  197. if err != nil {
  198. return err
  199. }
  200. req.Set("Authorization", "Bearer "+accessToken)
  201. }
  202. if a.AccountCredentials.ProjectID != "" {
  203. req.Set("x-goog-user-project", a.AccountCredentials.ProjectID)
  204. }
  205. return nil
  206. }
  207. func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
  208. if request == nil {
  209. return nil, errors.New("request is nil")
  210. }
  211. if a.RequestMode == RequestModeGemini && strings.HasPrefix(info.UpstreamModelName, "imagen") {
  212. prompt := ""
  213. for _, m := range request.Messages {
  214. if m.Role == "user" {
  215. prompt = m.StringContent()
  216. if prompt != "" {
  217. break
  218. }
  219. }
  220. }
  221. if prompt == "" {
  222. if p, ok := request.Prompt.(string); ok {
  223. prompt = p
  224. }
  225. }
  226. if prompt == "" {
  227. return nil, errors.New("prompt is required for image generation")
  228. }
  229. imgReq := dto.ImageRequest{
  230. Model: request.Model,
  231. Prompt: prompt,
  232. N: 1,
  233. Size: "1024x1024",
  234. }
  235. if request.N > 0 {
  236. imgReq.N = uint(request.N)
  237. }
  238. if request.Size != "" {
  239. imgReq.Size = request.Size
  240. }
  241. if len(request.ExtraBody) > 0 {
  242. var extra map[string]any
  243. if err := json.Unmarshal(request.ExtraBody, &extra); err == nil {
  244. if n, ok := extra["n"].(float64); ok && n > 0 {
  245. imgReq.N = uint(n)
  246. }
  247. if size, ok := extra["size"].(string); ok {
  248. imgReq.Size = size
  249. }
  250. // accept aspectRatio in extra body (top-level or under parameters)
  251. if ar, ok := extra["aspectRatio"].(string); ok && ar != "" {
  252. imgReq.Size = ar
  253. }
  254. if params, ok := extra["parameters"].(map[string]any); ok {
  255. if ar, ok := params["aspectRatio"].(string); ok && ar != "" {
  256. imgReq.Size = ar
  257. }
  258. }
  259. }
  260. }
  261. c.Set("request_model", request.Model)
  262. return a.ConvertImageRequest(c, info, imgReq)
  263. }
  264. if a.RequestMode == RequestModeClaude {
  265. claudeReq, err := claude.RequestOpenAI2ClaudeMessage(c, *request)
  266. if err != nil {
  267. return nil, err
  268. }
  269. vertexClaudeReq := copyRequest(claudeReq, anthropicVersion)
  270. c.Set("request_model", claudeReq.Model)
  271. info.UpstreamModelName = claudeReq.Model
  272. return vertexClaudeReq, nil
  273. } else if a.RequestMode == RequestModeGemini {
  274. geminiRequest, err := gemini.CovertGemini2OpenAI(c, *request, info)
  275. if err != nil {
  276. return nil, err
  277. }
  278. c.Set("request_model", request.Model)
  279. return geminiRequest, nil
  280. } else if a.RequestMode == RequestModeLlama {
  281. return request, nil
  282. }
  283. return nil, errors.New("unsupported request mode")
  284. }
  285. func (a *Adaptor) ConvertRerankRequest(c *gin.Context, relayMode int, request dto.RerankRequest) (any, error) {
  286. return nil, nil
  287. }
  288. func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.EmbeddingRequest) (any, error) {
  289. //TODO implement me
  290. return nil, errors.New("not implemented")
  291. }
  292. func (a *Adaptor) ConvertOpenAIResponsesRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.OpenAIResponsesRequest) (any, error) {
  293. // TODO implement me
  294. return nil, errors.New("not implemented")
  295. }
  296. func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, requestBody io.Reader) (any, error) {
  297. return channel.DoApiRequest(a, c, info, requestBody)
  298. }
  299. func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *types.NewAPIError) {
  300. if info.IsStream {
  301. switch a.RequestMode {
  302. case RequestModeClaude:
  303. return claude.ClaudeStreamHandler(c, resp, info, claude.RequestModeMessage)
  304. case RequestModeGemini:
  305. if info.RelayMode == constant.RelayModeGemini {
  306. return gemini.GeminiTextGenerationStreamHandler(c, info, resp)
  307. } else {
  308. return gemini.GeminiChatStreamHandler(c, info, resp)
  309. }
  310. case RequestModeLlama:
  311. return openai.OaiStreamHandler(c, info, resp)
  312. }
  313. } else {
  314. switch a.RequestMode {
  315. case RequestModeClaude:
  316. return claude.ClaudeHandler(c, resp, info, claude.RequestModeMessage)
  317. case RequestModeGemini:
  318. if info.RelayMode == constant.RelayModeGemini {
  319. return gemini.GeminiTextGenerationHandler(c, info, resp)
  320. } else {
  321. if strings.HasPrefix(info.UpstreamModelName, "imagen") {
  322. return gemini.GeminiImageHandler(c, info, resp)
  323. }
  324. return gemini.GeminiChatHandler(c, info, resp)
  325. }
  326. case RequestModeLlama:
  327. return openai.OpenaiHandler(c, info, resp)
  328. }
  329. }
  330. return
  331. }
  332. func (a *Adaptor) GetModelList() []string {
  333. var modelList []string
  334. for i, s := range ModelList {
  335. modelList = append(modelList, s)
  336. ModelList[i] = s
  337. }
  338. for i, s := range claude.ModelList {
  339. modelList = append(modelList, s)
  340. claude.ModelList[i] = s
  341. }
  342. for i, s := range gemini.ModelList {
  343. modelList = append(modelList, s)
  344. gemini.ModelList[i] = s
  345. }
  346. return modelList
  347. }
  348. func (a *Adaptor) GetChannelName() string {
  349. return ChannelName
  350. }