adaptor.go 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. package gemini
  2. import (
  3. "encoding/json"
  4. "errors"
  5. "fmt"
  6. "io"
  7. "net/http"
  8. "one-api/common"
  9. "one-api/dto"
  10. "one-api/relay/channel"
  11. relaycommon "one-api/relay/common"
  12. "one-api/relay/constant"
  13. "one-api/service"
  14. "one-api/setting/model_setting"
  15. "strings"
  16. "github.com/gin-gonic/gin"
  17. )
  18. type Adaptor struct {
  19. }
  20. func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
  21. //TODO implement me
  22. panic("implement me")
  23. return nil, nil
  24. }
  25. func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
  26. //TODO implement me
  27. return nil, errors.New("not implemented")
  28. }
  29. func (a *Adaptor) ConvertImageRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.ImageRequest) (any, error) {
  30. if !strings.HasPrefix(info.UpstreamModelName, "imagen") {
  31. return nil, errors.New("not supported model for image generation")
  32. }
  33. // convert size to aspect ratio
  34. aspectRatio := "1:1" // default aspect ratio
  35. switch request.Size {
  36. case "1024x1024":
  37. aspectRatio = "1:1"
  38. case "1024x1792":
  39. aspectRatio = "9:16"
  40. case "1792x1024":
  41. aspectRatio = "16:9"
  42. }
  43. // build gemini imagen request
  44. geminiRequest := GeminiImageRequest{
  45. Instances: []GeminiImageInstance{
  46. {
  47. Prompt: request.Prompt,
  48. },
  49. },
  50. Parameters: GeminiImageParameters{
  51. SampleCount: request.N,
  52. AspectRatio: aspectRatio,
  53. PersonGeneration: "allow_adult", // default allow adult
  54. },
  55. }
  56. return geminiRequest, nil
  57. }
  58. func (a *Adaptor) Init(info *relaycommon.RelayInfo) {
  59. }
  60. func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
  61. if model_setting.GetGeminiSettings().ThinkingAdapterEnabled {
  62. // suffix -thinking and -nothinking
  63. if strings.HasSuffix(info.OriginModelName, "-thinking") {
  64. info.UpstreamModelName = strings.TrimSuffix(info.UpstreamModelName, "-thinking")
  65. } else if strings.HasSuffix(info.OriginModelName, "-nothinking") {
  66. info.UpstreamModelName = strings.TrimSuffix(info.UpstreamModelName, "-nothinking")
  67. }
  68. }
  69. version := model_setting.GetGeminiVersionSetting(info.UpstreamModelName)
  70. if strings.HasPrefix(info.UpstreamModelName, "imagen") {
  71. return fmt.Sprintf("%s/%s/models/%s:predict", info.BaseUrl, version, info.UpstreamModelName), nil
  72. }
  73. if strings.HasPrefix(info.UpstreamModelName, "text-embedding") ||
  74. strings.HasPrefix(info.UpstreamModelName, "embedding") ||
  75. strings.HasPrefix(info.UpstreamModelName, "gemini-embedding") {
  76. return fmt.Sprintf("%s/%s/models/%s:embedContent", info.BaseUrl, version, info.UpstreamModelName), nil
  77. }
  78. action := "generateContent"
  79. if info.IsStream {
  80. action = "streamGenerateContent?alt=sse"
  81. }
  82. return fmt.Sprintf("%s/%s/models/%s:%s", info.BaseUrl, version, info.UpstreamModelName, action), nil
  83. }
  84. func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *relaycommon.RelayInfo) error {
  85. channel.SetupApiRequestHeader(info, c, req)
  86. req.Set("x-goog-api-key", info.ApiKey)
  87. return nil
  88. }
  89. func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
  90. if request == nil {
  91. return nil, errors.New("request is nil")
  92. }
  93. geminiRequest, err := CovertGemini2OpenAI(*request, info)
  94. if err != nil {
  95. return nil, err
  96. }
  97. return geminiRequest, nil
  98. }
  99. func (a *Adaptor) ConvertRerankRequest(c *gin.Context, relayMode int, request dto.RerankRequest) (any, error) {
  100. return nil, nil
  101. }
  102. func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.EmbeddingRequest) (any, error) {
  103. if request.Input == nil {
  104. return nil, errors.New("input is required")
  105. }
  106. inputs := request.ParseInput()
  107. if len(inputs) == 0 {
  108. return nil, errors.New("input is empty")
  109. }
  110. // only process the first input
  111. geminiRequest := GeminiEmbeddingRequest{
  112. Content: GeminiChatContent{
  113. Parts: []GeminiPart{
  114. {
  115. Text: inputs[0],
  116. },
  117. },
  118. },
  119. }
  120. // set specific parameters for different models
  121. // https://ai.google.dev/api/embeddings?hl=zh-cn#method:-models.embedcontent
  122. switch info.UpstreamModelName {
  123. case "text-embedding-004":
  124. // except embedding-001 supports setting `OutputDimensionality`
  125. if request.Dimensions > 0 {
  126. geminiRequest.OutputDimensionality = request.Dimensions
  127. }
  128. }
  129. return geminiRequest, nil
  130. }
  131. func (a *Adaptor) ConvertOpenAIResponsesRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.OpenAIResponsesRequest) (any, error) {
  132. // TODO implement me
  133. return nil, errors.New("not implemented")
  134. }
  135. func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, requestBody io.Reader) (any, error) {
  136. return channel.DoApiRequest(a, c, info, requestBody)
  137. }
  138. func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *dto.OpenAIErrorWithStatusCode) {
  139. if info.RelayMode == constant.RelayModeGemini {
  140. if info.IsStream {
  141. return GeminiTextGenerationStreamHandler(c, resp, info)
  142. } else {
  143. return GeminiTextGenerationHandler(c, resp, info)
  144. }
  145. }
  146. if strings.HasPrefix(info.UpstreamModelName, "imagen") {
  147. return GeminiImageHandler(c, resp, info)
  148. }
  149. // check if the model is an embedding model
  150. if strings.HasPrefix(info.UpstreamModelName, "text-embedding") ||
  151. strings.HasPrefix(info.UpstreamModelName, "embedding") ||
  152. strings.HasPrefix(info.UpstreamModelName, "gemini-embedding") {
  153. return GeminiEmbeddingHandler(c, resp, info)
  154. }
  155. if info.IsStream {
  156. err, usage = GeminiChatStreamHandler(c, resp, info)
  157. } else {
  158. err, usage = GeminiChatHandler(c, resp, info)
  159. }
  160. //if usage.(*dto.Usage).CompletionTokenDetails.ReasoningTokens > 100 {
  161. // // 没有请求-thinking的情况下,产生思考token,则按照思考模型计费
  162. // if !strings.HasSuffix(info.OriginModelName, "-thinking") &&
  163. // !strings.HasSuffix(info.OriginModelName, "-nothinking") {
  164. // thinkingModelName := info.OriginModelName + "-thinking"
  165. // if operation_setting.SelfUseModeEnabled || helper.ContainPriceOrRatio(thinkingModelName) {
  166. // info.OriginModelName = thinkingModelName
  167. // }
  168. // }
  169. //}
  170. return
  171. }
  172. func GeminiImageHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *dto.OpenAIErrorWithStatusCode) {
  173. responseBody, readErr := io.ReadAll(resp.Body)
  174. if readErr != nil {
  175. return nil, service.OpenAIErrorWrapper(readErr, "read_response_body_failed", http.StatusInternalServerError)
  176. }
  177. _ = resp.Body.Close()
  178. var geminiResponse GeminiImageResponse
  179. if jsonErr := json.Unmarshal(responseBody, &geminiResponse); jsonErr != nil {
  180. return nil, service.OpenAIErrorWrapper(jsonErr, "unmarshal_response_body_failed", http.StatusInternalServerError)
  181. }
  182. if len(geminiResponse.Predictions) == 0 {
  183. return nil, service.OpenAIErrorWrapper(errors.New("no images generated"), "no_images", http.StatusBadRequest)
  184. }
  185. // convert to openai format response
  186. openAIResponse := dto.ImageResponse{
  187. Created: common.GetTimestamp(),
  188. Data: make([]dto.ImageData, 0, len(geminiResponse.Predictions)),
  189. }
  190. for _, prediction := range geminiResponse.Predictions {
  191. if prediction.RaiFilteredReason != "" {
  192. continue // skip filtered image
  193. }
  194. openAIResponse.Data = append(openAIResponse.Data, dto.ImageData{
  195. B64Json: prediction.BytesBase64Encoded,
  196. })
  197. }
  198. jsonResponse, jsonErr := json.Marshal(openAIResponse)
  199. if jsonErr != nil {
  200. return nil, service.OpenAIErrorWrapper(jsonErr, "marshal_response_failed", http.StatusInternalServerError)
  201. }
  202. c.Writer.Header().Set("Content-Type", "application/json")
  203. c.Writer.WriteHeader(resp.StatusCode)
  204. _, _ = c.Writer.Write(jsonResponse)
  205. // https://github.com/google-gemini/cookbook/blob/719a27d752aac33f39de18a8d3cb42a70874917e/quickstarts/Counting_Tokens.ipynb
  206. // each image has fixed 258 tokens
  207. const imageTokens = 258
  208. generatedImages := len(openAIResponse.Data)
  209. usage = &dto.Usage{
  210. PromptTokens: imageTokens * generatedImages, // each generated image has fixed 258 tokens
  211. CompletionTokens: 0, // image generation does not calculate completion tokens
  212. TotalTokens: imageTokens * generatedImages,
  213. }
  214. return usage, nil
  215. }
  216. func (a *Adaptor) GetModelList() []string {
  217. return ModelList
  218. }
  219. func (a *Adaptor) GetChannelName() string {
  220. return ChannelName
  221. }