adaptor.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. package gemini
  2. import (
  3. "encoding/json"
  4. "errors"
  5. "fmt"
  6. "io"
  7. "net/http"
  8. "strings"
  9. "github.com/QuantumNous/new-api/dto"
  10. "github.com/QuantumNous/new-api/relay/channel"
  11. "github.com/QuantumNous/new-api/relay/channel/openai"
  12. relaycommon "github.com/QuantumNous/new-api/relay/common"
  13. "github.com/QuantumNous/new-api/relay/constant"
  14. "github.com/QuantumNous/new-api/setting/model_setting"
  15. "github.com/QuantumNous/new-api/types"
  16. "github.com/gin-gonic/gin"
  17. )
  18. type Adaptor struct {
  19. }
  20. func (a *Adaptor) ConvertGeminiRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeminiChatRequest) (any, error) {
  21. if len(request.Contents) > 0 {
  22. for i, content := range request.Contents {
  23. if i == 0 {
  24. if request.Contents[0].Role == "" {
  25. request.Contents[0].Role = "user"
  26. }
  27. }
  28. for _, part := range content.Parts {
  29. if part.FileData != nil {
  30. if part.FileData.MimeType == "" && strings.Contains(part.FileData.FileUri, "www.youtube.com") {
  31. part.FileData.MimeType = "video/webm"
  32. }
  33. }
  34. }
  35. }
  36. }
  37. return request, nil
  38. }
  39. func (a *Adaptor) ConvertClaudeRequest(c *gin.Context, info *relaycommon.RelayInfo, req *dto.ClaudeRequest) (any, error) {
  40. adaptor := openai.Adaptor{}
  41. oaiReq, err := adaptor.ConvertClaudeRequest(c, info, req)
  42. if err != nil {
  43. return nil, err
  44. }
  45. return a.ConvertOpenAIRequest(c, info, oaiReq.(*dto.GeneralOpenAIRequest))
  46. }
  47. func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
  48. //TODO implement me
  49. return nil, errors.New("not implemented")
  50. }
  51. type ImageConfig struct {
  52. AspectRatio string `json:"aspectRatio,omitempty"`
  53. ImageSize string `json:"imageSize,omitempty"`
  54. }
  55. type SizeMapping struct {
  56. AspectRatio string
  57. ImageSize string
  58. }
  59. type QualityMapping struct {
  60. Standard string
  61. HD string
  62. High string
  63. FourK string
  64. Auto string
  65. }
  66. func getImageSizeMapping() QualityMapping {
  67. return QualityMapping{
  68. Standard: "1K",
  69. HD: "2K",
  70. High: "2K",
  71. FourK: "4K",
  72. Auto: "1K",
  73. }
  74. }
  75. func getSizeMappings() map[string]SizeMapping {
  76. return map[string]SizeMapping{
  77. // Gemini 2.5 Flash Image - default 1K resolutions
  78. "1024x1024": {AspectRatio: "1:1", ImageSize: ""},
  79. "832x1248": {AspectRatio: "2:3", ImageSize: ""},
  80. "1248x832": {AspectRatio: "3:2", ImageSize: ""},
  81. "864x1184": {AspectRatio: "3:4", ImageSize: ""},
  82. "1184x864": {AspectRatio: "4:3", ImageSize: ""},
  83. "896x1152": {AspectRatio: "4:5", ImageSize: ""},
  84. "1152x896": {AspectRatio: "5:4", ImageSize: ""},
  85. "768x1344": {AspectRatio: "9:16", ImageSize: ""},
  86. "1344x768": {AspectRatio: "16:9", ImageSize: ""},
  87. "1536x672": {AspectRatio: "21:9", ImageSize: ""},
  88. // Gemini 3 Pro Image Preview resolutions
  89. "1536x1024": {AspectRatio: "3:2", ImageSize: ""},
  90. "1024x1536": {AspectRatio: "2:3", ImageSize: ""},
  91. "1024x1792": {AspectRatio: "9:16", ImageSize: ""},
  92. "1792x1024": {AspectRatio: "16:9", ImageSize: ""},
  93. "2048x2048": {AspectRatio: "1:1", ImageSize: "2K"},
  94. "4096x4096": {AspectRatio: "1:1", ImageSize: "4K"},
  95. }
  96. }
  97. func processSizeParameters(size, quality string) ImageConfig {
  98. config := ImageConfig{} // 默认为空值
  99. if size != "" {
  100. if strings.Contains(size, ":") {
  101. config.AspectRatio = size // 直接设置,不与默认值比较
  102. } else {
  103. if mapping, exists := getSizeMappings()[size]; exists {
  104. if mapping.AspectRatio != "" {
  105. config.AspectRatio = mapping.AspectRatio
  106. }
  107. if mapping.ImageSize != "" {
  108. config.ImageSize = mapping.ImageSize
  109. }
  110. }
  111. }
  112. }
  113. if quality != "" {
  114. qualityMapping := getImageSizeMapping()
  115. switch strings.ToLower(strings.TrimSpace(quality)) {
  116. case "hd", "high":
  117. config.ImageSize = qualityMapping.HD
  118. case "4k":
  119. config.ImageSize = qualityMapping.FourK
  120. case "standard", "medium", "low", "auto", "1k":
  121. config.ImageSize = qualityMapping.Standard
  122. }
  123. }
  124. return config
  125. }
  126. func (a *Adaptor) ConvertImageRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.ImageRequest) (any, error) {
  127. if strings.HasPrefix(info.UpstreamModelName, "gemini-3-pro-image") {
  128. chatRequest := dto.GeneralOpenAIRequest{
  129. Model: request.Model,
  130. Messages: []dto.Message{
  131. {Role: "user", Content: request.Prompt},
  132. },
  133. N: int(request.N),
  134. }
  135. config := processSizeParameters(strings.TrimSpace(request.Size), request.Quality)
  136. googleGenerationConfig := map[string]interface{}{
  137. "responseModalities": []string{"TEXT", "IMAGE"},
  138. "imageConfig": config,
  139. }
  140. extraBody := map[string]interface{}{
  141. "google": map[string]interface{}{
  142. "generationConfig": googleGenerationConfig,
  143. },
  144. }
  145. chatRequest.ExtraBody, _ = json.Marshal(extraBody)
  146. return a.ConvertOpenAIRequest(c, info, &chatRequest)
  147. }
  148. // convert size to aspect ratio but allow user to specify aspect ratio
  149. aspectRatio := "1:1" // default aspect ratio
  150. size := strings.TrimSpace(request.Size)
  151. if size != "" {
  152. if strings.Contains(size, ":") {
  153. aspectRatio = size
  154. } else {
  155. if mapping, exists := getSizeMappings()[size]; exists && mapping.AspectRatio != "" {
  156. aspectRatio = mapping.AspectRatio
  157. }
  158. }
  159. }
  160. // build gemini imagen request
  161. geminiRequest := dto.GeminiImageRequest{
  162. Instances: []dto.GeminiImageInstance{
  163. {
  164. Prompt: request.Prompt,
  165. },
  166. },
  167. Parameters: dto.GeminiImageParameters{
  168. SampleCount: int(request.N),
  169. AspectRatio: aspectRatio,
  170. PersonGeneration: "allow_adult", // default allow adult
  171. },
  172. }
  173. // Set imageSize when quality parameter is specified
  174. // Map quality parameter to imageSize (only supported by Standard and Ultra models)
  175. // quality values: auto, high, medium, low (for gpt-image-1), hd, standard (for dall-e-3)
  176. // imageSize values: 1K (default), 2K
  177. // https://ai.google.dev/gemini-api/docs/imagen
  178. // https://platform.openai.com/docs/api-reference/images/create
  179. if request.Quality != "" {
  180. imageSize := "1K" // default
  181. switch request.Quality {
  182. case "hd", "high":
  183. imageSize = "2K"
  184. case "2K":
  185. imageSize = "2K"
  186. case "standard", "medium", "low", "auto", "1K":
  187. imageSize = "1K"
  188. default:
  189. // unknown quality value, default to 1K
  190. imageSize = "1K"
  191. }
  192. geminiRequest.Parameters.ImageSize = imageSize
  193. }
  194. return geminiRequest, nil
  195. }
  196. func (a *Adaptor) Init(info *relaycommon.RelayInfo) {
  197. }
  198. func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
  199. if model_setting.GetGeminiSettings().ThinkingAdapterEnabled &&
  200. !model_setting.ShouldPreserveThinkingSuffix(info.OriginModelName) {
  201. // 新增逻辑:处理 -thinking-<budget> 格式
  202. if strings.Contains(info.UpstreamModelName, "-thinking-") {
  203. parts := strings.Split(info.UpstreamModelName, "-thinking-")
  204. info.UpstreamModelName = parts[0]
  205. } else if strings.HasSuffix(info.UpstreamModelName, "-thinking") { // 旧的适配
  206. info.UpstreamModelName = strings.TrimSuffix(info.UpstreamModelName, "-thinking")
  207. } else if strings.HasSuffix(info.UpstreamModelName, "-nothinking") {
  208. info.UpstreamModelName = strings.TrimSuffix(info.UpstreamModelName, "-nothinking")
  209. }
  210. }
  211. version := model_setting.GetGeminiVersionSetting(info.UpstreamModelName)
  212. if strings.HasPrefix(info.UpstreamModelName, "imagen") {
  213. return fmt.Sprintf("%s/%s/models/%s:predict", info.ChannelBaseUrl, version, info.UpstreamModelName), nil
  214. }
  215. if strings.HasPrefix(info.UpstreamModelName, "text-embedding") ||
  216. strings.HasPrefix(info.UpstreamModelName, "embedding") ||
  217. strings.HasPrefix(info.UpstreamModelName, "gemini-embedding") {
  218. action := "embedContent"
  219. if info.IsGeminiBatchEmbedding {
  220. action = "batchEmbedContents"
  221. }
  222. return fmt.Sprintf("%s/%s/models/%s:%s", info.ChannelBaseUrl, version, info.UpstreamModelName, action), nil
  223. }
  224. action := "generateContent"
  225. if info.IsStream {
  226. action = "streamGenerateContent?alt=sse"
  227. if info.RelayMode == constant.RelayModeGemini {
  228. info.DisablePing = true
  229. }
  230. }
  231. return fmt.Sprintf("%s/%s/models/%s:%s", info.ChannelBaseUrl, version, info.UpstreamModelName, action), nil
  232. }
  233. func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *relaycommon.RelayInfo) error {
  234. channel.SetupApiRequestHeader(info, c, req)
  235. req.Set("x-goog-api-key", info.ApiKey)
  236. return nil
  237. }
  238. func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
  239. if request == nil {
  240. return nil, errors.New("request is nil")
  241. }
  242. geminiRequest, err := CovertOpenAI2Gemini(c, *request, info)
  243. if err != nil {
  244. return nil, err
  245. }
  246. return geminiRequest, nil
  247. }
  248. func (a *Adaptor) ConvertRerankRequest(c *gin.Context, relayMode int, request dto.RerankRequest) (any, error) {
  249. return nil, nil
  250. }
  251. func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.EmbeddingRequest) (any, error) {
  252. if request.Input == nil {
  253. return nil, errors.New("input is required")
  254. }
  255. inputs := request.ParseInput()
  256. if len(inputs) == 0 {
  257. return nil, errors.New("input is empty")
  258. }
  259. // We always build a batch-style payload with `requests`, so ensure we call the
  260. // batch endpoint upstream to avoid payload/endpoint mismatches.
  261. info.IsGeminiBatchEmbedding = true
  262. // process all inputs
  263. geminiRequests := make([]map[string]interface{}, 0, len(inputs))
  264. for _, input := range inputs {
  265. geminiRequest := map[string]interface{}{
  266. "model": fmt.Sprintf("models/%s", info.UpstreamModelName),
  267. "content": dto.GeminiChatContent{
  268. Parts: []dto.GeminiPart{
  269. {
  270. Text: input,
  271. },
  272. },
  273. },
  274. }
  275. // set specific parameters for different models
  276. // https://ai.google.dev/api/embeddings?hl=zh-cn#method:-models.embedcontent
  277. switch info.UpstreamModelName {
  278. case "text-embedding-004", "gemini-embedding-exp-03-07", "gemini-embedding-001":
  279. // Only newer models introduced after 2024 support OutputDimensionality
  280. if request.Dimensions > 0 {
  281. geminiRequest["outputDimensionality"] = request.Dimensions
  282. }
  283. }
  284. geminiRequests = append(geminiRequests, geminiRequest)
  285. }
  286. return map[string]interface{}{
  287. "requests": geminiRequests,
  288. }, nil
  289. }
  290. func (a *Adaptor) ConvertOpenAIResponsesRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.OpenAIResponsesRequest) (any, error) {
  291. // TODO implement me
  292. return nil, errors.New("not implemented")
  293. }
  294. func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, requestBody io.Reader) (any, error) {
  295. return channel.DoApiRequest(a, c, info, requestBody)
  296. }
  297. func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *types.NewAPIError) {
  298. if info.RelayMode == constant.RelayModeGemini {
  299. if strings.Contains(info.RequestURLPath, ":embedContent") ||
  300. strings.Contains(info.RequestURLPath, ":batchEmbedContents") {
  301. return NativeGeminiEmbeddingHandler(c, resp, info)
  302. }
  303. if info.IsStream {
  304. return GeminiTextGenerationStreamHandler(c, info, resp)
  305. } else {
  306. return GeminiTextGenerationHandler(c, info, resp)
  307. }
  308. }
  309. if strings.HasPrefix(info.UpstreamModelName, "imagen") {
  310. return GeminiImageHandler(c, info, resp)
  311. }
  312. if model_setting.IsGeminiModelSupportImagine(info.UpstreamModelName) {
  313. return ChatImageHandler(c, info, resp)
  314. }
  315. // check if the model is an embedding model
  316. if strings.HasPrefix(info.UpstreamModelName, "text-embedding") ||
  317. strings.HasPrefix(info.UpstreamModelName, "embedding") ||
  318. strings.HasPrefix(info.UpstreamModelName, "gemini-embedding") {
  319. return GeminiEmbeddingHandler(c, info, resp)
  320. }
  321. if info.IsStream {
  322. return GeminiChatStreamHandler(c, info, resp)
  323. } else {
  324. return GeminiChatHandler(c, info, resp)
  325. }
  326. }
  327. func (a *Adaptor) GetModelList() []string {
  328. return ModelList
  329. }
  330. func (a *Adaptor) GetChannelName() string {
  331. return ChannelName
  332. }