adaptor.go 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. package gemini
  2. import (
  3. "encoding/json"
  4. "errors"
  5. "fmt"
  6. "io"
  7. "net/http"
  8. "one-api/common"
  9. "one-api/dto"
  10. "one-api/relay/channel"
  11. relaycommon "one-api/relay/common"
  12. "one-api/service"
  13. "one-api/setting/model_setting"
  14. "strings"
  15. "github.com/gin-gonic/gin"
  16. )
  17. type Adaptor struct {
  18. }
  19. func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
  20. //TODO implement me
  21. panic("implement me")
  22. return nil, nil
  23. }
  24. func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
  25. //TODO implement me
  26. return nil, errors.New("not implemented")
  27. }
  28. func (a *Adaptor) ConvertImageRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.ImageRequest) (any, error) {
  29. if !strings.HasPrefix(info.UpstreamModelName, "imagen") {
  30. return nil, errors.New("not supported model for image generation")
  31. }
  32. // convert size to aspect ratio
  33. aspectRatio := "1:1" // default aspect ratio
  34. switch request.Size {
  35. case "1024x1024":
  36. aspectRatio = "1:1"
  37. case "1024x1792":
  38. aspectRatio = "9:16"
  39. case "1792x1024":
  40. aspectRatio = "16:9"
  41. }
  42. // build gemini imagen request
  43. geminiRequest := GeminiImageRequest{
  44. Instances: []GeminiImageInstance{
  45. {
  46. Prompt: request.Prompt,
  47. },
  48. },
  49. Parameters: GeminiImageParameters{
  50. SampleCount: request.N,
  51. AspectRatio: aspectRatio,
  52. PersonGeneration: "allow_adult", // default allow adult
  53. },
  54. }
  55. return geminiRequest, nil
  56. }
  57. func (a *Adaptor) Init(info *relaycommon.RelayInfo) {
  58. }
  59. func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
  60. if model_setting.GetGeminiSettings().ThinkingAdapterEnabled {
  61. // suffix -thinking and -nothinking
  62. if strings.HasSuffix(info.OriginModelName, "-thinking") {
  63. info.UpstreamModelName = strings.TrimSuffix(info.UpstreamModelName, "-thinking")
  64. } else if strings.HasSuffix(info.OriginModelName, "-nothinking") {
  65. info.UpstreamModelName = strings.TrimSuffix(info.UpstreamModelName, "-nothinking")
  66. }
  67. }
  68. version := model_setting.GetGeminiVersionSetting(info.UpstreamModelName)
  69. if strings.HasPrefix(info.UpstreamModelName, "imagen") {
  70. return fmt.Sprintf("%s/%s/models/%s:predict", info.BaseUrl, version, info.UpstreamModelName), nil
  71. }
  72. if strings.HasPrefix(info.UpstreamModelName, "text-embedding") ||
  73. strings.HasPrefix(info.UpstreamModelName, "embedding") ||
  74. strings.HasPrefix(info.UpstreamModelName, "gemini-embedding") {
  75. return fmt.Sprintf("%s/%s/models/%s:embedContent", info.BaseUrl, version, info.UpstreamModelName), nil
  76. }
  77. action := "generateContent"
  78. if info.IsStream {
  79. action = "streamGenerateContent?alt=sse"
  80. }
  81. return fmt.Sprintf("%s/%s/models/%s:%s", info.BaseUrl, version, info.UpstreamModelName, action), nil
  82. }
  83. func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *relaycommon.RelayInfo) error {
  84. channel.SetupApiRequestHeader(info, c, req)
  85. req.Set("x-goog-api-key", info.ApiKey)
  86. return nil
  87. }
  88. func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
  89. if request == nil {
  90. return nil, errors.New("request is nil")
  91. }
  92. geminiRequest, err := CovertGemini2OpenAI(*request, info)
  93. if err != nil {
  94. return nil, err
  95. }
  96. return geminiRequest, nil
  97. }
  98. func (a *Adaptor) ConvertRerankRequest(c *gin.Context, relayMode int, request dto.RerankRequest) (any, error) {
  99. return nil, nil
  100. }
  101. func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.EmbeddingRequest) (any, error) {
  102. if request.Input == nil {
  103. return nil, errors.New("input is required")
  104. }
  105. inputs := request.ParseInput()
  106. if len(inputs) == 0 {
  107. return nil, errors.New("input is empty")
  108. }
  109. // only process the first input
  110. geminiRequest := GeminiEmbeddingRequest{
  111. Content: GeminiChatContent{
  112. Parts: []GeminiPart{
  113. {
  114. Text: inputs[0],
  115. },
  116. },
  117. },
  118. }
  119. // set specific parameters for different models
  120. // https://ai.google.dev/api/embeddings?hl=zh-cn#method:-models.embedcontent
  121. switch info.UpstreamModelName {
  122. case "text-embedding-004":
  123. // except embedding-001 supports setting `OutputDimensionality`
  124. if request.Dimensions > 0 {
  125. geminiRequest.OutputDimensionality = request.Dimensions
  126. }
  127. }
  128. return geminiRequest, nil
  129. }
  130. func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, requestBody io.Reader) (any, error) {
  131. return channel.DoApiRequest(a, c, info, requestBody)
  132. }
  133. func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *dto.OpenAIErrorWithStatusCode) {
  134. if strings.HasPrefix(info.UpstreamModelName, "imagen") {
  135. return GeminiImageHandler(c, resp, info)
  136. }
  137. // check if the model is an embedding model
  138. if strings.HasPrefix(info.UpstreamModelName, "text-embedding") ||
  139. strings.HasPrefix(info.UpstreamModelName, "embedding") ||
  140. strings.HasPrefix(info.UpstreamModelName, "gemini-embedding") {
  141. return GeminiEmbeddingHandler(c, resp, info)
  142. }
  143. if info.IsStream {
  144. err, usage = GeminiChatStreamHandler(c, resp, info)
  145. } else {
  146. err, usage = GeminiChatHandler(c, resp, info)
  147. }
  148. //if usage.(*dto.Usage).CompletionTokenDetails.ReasoningTokens > 100 {
  149. // // 没有请求-thinking的情况下,产生思考token,则按照思考模型计费
  150. // if !strings.HasSuffix(info.OriginModelName, "-thinking") &&
  151. // !strings.HasSuffix(info.OriginModelName, "-nothinking") {
  152. // thinkingModelName := info.OriginModelName + "-thinking"
  153. // if operation_setting.SelfUseModeEnabled || helper.ContainPriceOrRatio(thinkingModelName) {
  154. // info.OriginModelName = thinkingModelName
  155. // }
  156. // }
  157. //}
  158. return
  159. }
  160. func GeminiImageHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *dto.OpenAIErrorWithStatusCode) {
  161. responseBody, readErr := io.ReadAll(resp.Body)
  162. if readErr != nil {
  163. return nil, service.OpenAIErrorWrapper(readErr, "read_response_body_failed", http.StatusInternalServerError)
  164. }
  165. _ = resp.Body.Close()
  166. var geminiResponse GeminiImageResponse
  167. if jsonErr := json.Unmarshal(responseBody, &geminiResponse); jsonErr != nil {
  168. return nil, service.OpenAIErrorWrapper(jsonErr, "unmarshal_response_body_failed", http.StatusInternalServerError)
  169. }
  170. if len(geminiResponse.Predictions) == 0 {
  171. return nil, service.OpenAIErrorWrapper(errors.New("no images generated"), "no_images", http.StatusBadRequest)
  172. }
  173. // convert to openai format response
  174. openAIResponse := dto.ImageResponse{
  175. Created: common.GetTimestamp(),
  176. Data: make([]dto.ImageData, 0, len(geminiResponse.Predictions)),
  177. }
  178. for _, prediction := range geminiResponse.Predictions {
  179. if prediction.RaiFilteredReason != "" {
  180. continue // skip filtered image
  181. }
  182. openAIResponse.Data = append(openAIResponse.Data, dto.ImageData{
  183. B64Json: prediction.BytesBase64Encoded,
  184. })
  185. }
  186. jsonResponse, jsonErr := json.Marshal(openAIResponse)
  187. if jsonErr != nil {
  188. return nil, service.OpenAIErrorWrapper(jsonErr, "marshal_response_failed", http.StatusInternalServerError)
  189. }
  190. c.Writer.Header().Set("Content-Type", "application/json")
  191. c.Writer.WriteHeader(resp.StatusCode)
  192. _, _ = c.Writer.Write(jsonResponse)
  193. // https://github.com/google-gemini/cookbook/blob/719a27d752aac33f39de18a8d3cb42a70874917e/quickstarts/Counting_Tokens.ipynb
  194. // each image has fixed 258 tokens
  195. const imageTokens = 258
  196. generatedImages := len(openAIResponse.Data)
  197. usage = &dto.Usage{
  198. PromptTokens: imageTokens * generatedImages, // each generated image has fixed 258 tokens
  199. CompletionTokens: 0, // image generation does not calculate completion tokens
  200. TotalTokens: imageTokens * generatedImages,
  201. }
  202. return usage, nil
  203. }
  204. func (a *Adaptor) GetModelList() []string {
  205. return ModelList
  206. }
  207. func (a *Adaptor) GetChannelName() string {
  208. return ChannelName
  209. }