adaptor.go 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. package gemini
  2. import (
  3. "errors"
  4. "fmt"
  5. "io"
  6. "net/http"
  7. "strings"
  8. "github.com/QuantumNous/new-api/dto"
  9. "github.com/QuantumNous/new-api/relay/channel"
  10. "github.com/QuantumNous/new-api/relay/channel/openai"
  11. relaycommon "github.com/QuantumNous/new-api/relay/common"
  12. "github.com/QuantumNous/new-api/relay/constant"
  13. "github.com/QuantumNous/new-api/setting/model_setting"
  14. "github.com/QuantumNous/new-api/setting/reasoning"
  15. "github.com/QuantumNous/new-api/types"
  16. "github.com/gin-gonic/gin"
  17. "github.com/samber/lo"
  18. )
  19. type Adaptor struct {
  20. }
  21. func (a *Adaptor) ConvertGeminiRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeminiChatRequest) (any, error) {
  22. if len(request.Contents) > 0 {
  23. for i, content := range request.Contents {
  24. if i == 0 {
  25. if request.Contents[0].Role == "" {
  26. request.Contents[0].Role = "user"
  27. }
  28. }
  29. for _, part := range content.Parts {
  30. if part.FileData != nil {
  31. if part.FileData.MimeType == "" && strings.Contains(part.FileData.FileUri, "www.youtube.com") {
  32. part.FileData.MimeType = "video/webm"
  33. }
  34. }
  35. }
  36. }
  37. }
  38. return request, nil
  39. }
  40. func (a *Adaptor) ConvertClaudeRequest(c *gin.Context, info *relaycommon.RelayInfo, req *dto.ClaudeRequest) (any, error) {
  41. adaptor := openai.Adaptor{}
  42. oaiReq, err := adaptor.ConvertClaudeRequest(c, info, req)
  43. if err != nil {
  44. return nil, err
  45. }
  46. return a.ConvertOpenAIRequest(c, info, oaiReq.(*dto.GeneralOpenAIRequest))
  47. }
  48. func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
  49. //TODO implement me
  50. return nil, errors.New("not implemented")
  51. }
  52. func (a *Adaptor) ConvertImageRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.ImageRequest) (any, error) {
  53. if !strings.HasPrefix(info.UpstreamModelName, "imagen") {
  54. return nil, errors.New("not supported model for image generation")
  55. }
  56. // convert size to aspect ratio but allow user to specify aspect ratio
  57. aspectRatio := "1:1" // default aspect ratio
  58. size := strings.TrimSpace(request.Size)
  59. if size != "" {
  60. if strings.Contains(size, ":") {
  61. aspectRatio = size
  62. } else {
  63. switch size {
  64. case "256x256", "512x512", "1024x1024":
  65. aspectRatio = "1:1"
  66. case "1536x1024":
  67. aspectRatio = "3:2"
  68. case "1024x1536":
  69. aspectRatio = "2:3"
  70. case "1024x1792":
  71. aspectRatio = "9:16"
  72. case "1792x1024":
  73. aspectRatio = "16:9"
  74. }
  75. }
  76. }
  77. // build gemini imagen request
  78. geminiRequest := dto.GeminiImageRequest{
  79. Instances: []dto.GeminiImageInstance{
  80. {
  81. Prompt: request.Prompt,
  82. },
  83. },
  84. Parameters: dto.GeminiImageParameters{
  85. SampleCount: int(lo.FromPtrOr(request.N, uint(1))),
  86. AspectRatio: aspectRatio,
  87. PersonGeneration: "allow_adult", // default allow adult
  88. },
  89. }
  90. // Set imageSize when quality parameter is specified
  91. // Map quality parameter to imageSize (only supported by Standard and Ultra models)
  92. // quality values: auto, high, medium, low (for gpt-image-1), hd, standard (for dall-e-3)
  93. // imageSize values: 1K (default), 2K
  94. // https://ai.google.dev/gemini-api/docs/imagen
  95. // https://platform.openai.com/docs/api-reference/images/create
  96. if request.Quality != "" {
  97. imageSize := "1K" // default
  98. switch request.Quality {
  99. case "hd", "high":
  100. imageSize = "2K"
  101. case "2K":
  102. imageSize = "2K"
  103. case "standard", "medium", "low", "auto", "1K":
  104. imageSize = "1K"
  105. default:
  106. // unknown quality value, default to 1K
  107. imageSize = "1K"
  108. }
  109. geminiRequest.Parameters.ImageSize = imageSize
  110. }
  111. return geminiRequest, nil
  112. }
  113. func (a *Adaptor) Init(info *relaycommon.RelayInfo) {
  114. }
  115. func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
  116. if model_setting.GetGeminiSettings().ThinkingAdapterEnabled &&
  117. !model_setting.ShouldPreserveThinkingSuffix(info.OriginModelName) {
  118. // 新增逻辑:处理 -thinking-<budget> 格式
  119. if strings.Contains(info.UpstreamModelName, "-thinking-") {
  120. parts := strings.Split(info.UpstreamModelName, "-thinking-")
  121. info.UpstreamModelName = parts[0]
  122. } else if strings.HasSuffix(info.UpstreamModelName, "-thinking") { // 旧的适配
  123. info.UpstreamModelName = strings.TrimSuffix(info.UpstreamModelName, "-thinking")
  124. } else if strings.HasSuffix(info.UpstreamModelName, "-nothinking") {
  125. info.UpstreamModelName = strings.TrimSuffix(info.UpstreamModelName, "-nothinking")
  126. } else if baseModel, level, ok := reasoning.TrimEffortSuffix(info.UpstreamModelName); ok && level != "" {
  127. info.UpstreamModelName = baseModel
  128. }
  129. }
  130. version := model_setting.GetGeminiVersionSetting(info.UpstreamModelName)
  131. if strings.HasPrefix(info.UpstreamModelName, "imagen") {
  132. return fmt.Sprintf("%s/%s/models/%s:predict", info.ChannelBaseUrl, version, info.UpstreamModelName), nil
  133. }
  134. if strings.HasPrefix(info.UpstreamModelName, "text-embedding") ||
  135. strings.HasPrefix(info.UpstreamModelName, "embedding") ||
  136. strings.HasPrefix(info.UpstreamModelName, "gemini-embedding") {
  137. action := "embedContent"
  138. if info.IsGeminiBatchEmbedding {
  139. action = "batchEmbedContents"
  140. }
  141. return fmt.Sprintf("%s/%s/models/%s:%s", info.ChannelBaseUrl, version, info.UpstreamModelName, action), nil
  142. }
  143. action := "generateContent"
  144. if info.IsStream {
  145. action = "streamGenerateContent?alt=sse"
  146. if info.RelayMode == constant.RelayModeGemini {
  147. info.DisablePing = true
  148. }
  149. }
  150. return fmt.Sprintf("%s/%s/models/%s:%s", info.ChannelBaseUrl, version, info.UpstreamModelName, action), nil
  151. }
  152. func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *relaycommon.RelayInfo) error {
  153. channel.SetupApiRequestHeader(info, c, req)
  154. req.Set("x-goog-api-key", info.ApiKey)
  155. return nil
  156. }
  157. func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
  158. if request == nil {
  159. return nil, errors.New("request is nil")
  160. }
  161. geminiRequest, err := CovertOpenAI2Gemini(c, *request, info)
  162. if err != nil {
  163. return nil, err
  164. }
  165. return geminiRequest, nil
  166. }
  167. func (a *Adaptor) ConvertRerankRequest(c *gin.Context, relayMode int, request dto.RerankRequest) (any, error) {
  168. return nil, nil
  169. }
  170. func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.EmbeddingRequest) (any, error) {
  171. if request.Input == nil {
  172. return nil, errors.New("input is required")
  173. }
  174. inputs := request.ParseInput()
  175. if len(inputs) == 0 {
  176. return nil, errors.New("input is empty")
  177. }
  178. // We always build a batch-style payload with `requests`, so ensure we call the
  179. // batch endpoint upstream to avoid payload/endpoint mismatches.
  180. info.IsGeminiBatchEmbedding = true
  181. // process all inputs
  182. geminiRequests := make([]map[string]interface{}, 0, len(inputs))
  183. for _, input := range inputs {
  184. geminiRequest := map[string]interface{}{
  185. "model": fmt.Sprintf("models/%s", info.UpstreamModelName),
  186. "content": dto.GeminiChatContent{
  187. Parts: []dto.GeminiPart{
  188. {
  189. Text: input,
  190. },
  191. },
  192. },
  193. }
  194. // set specific parameters for different models
  195. // https://ai.google.dev/api/embeddings?hl=zh-cn#method:-models.embedcontent
  196. switch info.UpstreamModelName {
  197. case "text-embedding-004", "gemini-embedding-exp-03-07", "gemini-embedding-001":
  198. // Only newer models introduced after 2024 support OutputDimensionality
  199. dimensions := lo.FromPtrOr(request.Dimensions, 0)
  200. if dimensions > 0 {
  201. geminiRequest["outputDimensionality"] = dimensions
  202. }
  203. }
  204. geminiRequests = append(geminiRequests, geminiRequest)
  205. }
  206. return map[string]interface{}{
  207. "requests": geminiRequests,
  208. }, nil
  209. }
  210. func (a *Adaptor) ConvertOpenAIResponsesRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.OpenAIResponsesRequest) (any, error) {
  211. // TODO implement me
  212. return nil, errors.New("not implemented")
  213. }
  214. func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, requestBody io.Reader) (any, error) {
  215. return channel.DoApiRequest(a, c, info, requestBody)
  216. }
  217. func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *types.NewAPIError) {
  218. if info.RelayMode == constant.RelayModeGemini {
  219. if strings.Contains(info.RequestURLPath, ":embedContent") ||
  220. strings.Contains(info.RequestURLPath, ":batchEmbedContents") {
  221. return NativeGeminiEmbeddingHandler(c, resp, info)
  222. }
  223. if info.IsStream {
  224. return GeminiTextGenerationStreamHandler(c, info, resp)
  225. } else {
  226. return GeminiTextGenerationHandler(c, info, resp)
  227. }
  228. }
  229. if strings.HasPrefix(info.UpstreamModelName, "imagen") {
  230. return GeminiImageHandler(c, info, resp)
  231. }
  232. // check if the model is an embedding model
  233. if strings.HasPrefix(info.UpstreamModelName, "text-embedding") ||
  234. strings.HasPrefix(info.UpstreamModelName, "embedding") ||
  235. strings.HasPrefix(info.UpstreamModelName, "gemini-embedding") {
  236. return GeminiEmbeddingHandler(c, info, resp)
  237. }
  238. if info.IsStream {
  239. return GeminiChatStreamHandler(c, info, resp)
  240. } else {
  241. return GeminiChatHandler(c, info, resp)
  242. }
  243. }
  244. func (a *Adaptor) GetModelList() []string {
  245. return ModelList
  246. }
  247. func (a *Adaptor) GetChannelName() string {
  248. return ChannelName
  249. }