relay-gemini-native.go 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. package gemini
  2. import (
  3. "io"
  4. "net/http"
  5. "strings"
  6. "github.com/QuantumNous/new-api/common"
  7. "github.com/QuantumNous/new-api/dto"
  8. "github.com/QuantumNous/new-api/logger"
  9. relaycommon "github.com/QuantumNous/new-api/relay/common"
  10. "github.com/QuantumNous/new-api/relay/helper"
  11. "github.com/QuantumNous/new-api/service"
  12. "github.com/QuantumNous/new-api/types"
  13. "github.com/pkg/errors"
  14. "github.com/gin-gonic/gin"
  15. )
  16. func GeminiTextGenerationHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
  17. defer service.CloseResponseBodyGracefully(resp)
  18. // 读取响应体
  19. responseBody, err := io.ReadAll(resp.Body)
  20. if err != nil {
  21. return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  22. }
  23. if common.DebugEnabled {
  24. println(string(responseBody))
  25. }
  26. // 解析为 Gemini 原生响应格式
  27. var geminiResponse dto.GeminiChatResponse
  28. err = common.Unmarshal(responseBody, &geminiResponse)
  29. if err != nil {
  30. return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  31. }
  32. // 计算使用量(基于 UsageMetadata)
  33. usage := dto.Usage{
  34. PromptTokens: geminiResponse.UsageMetadata.PromptTokenCount,
  35. CompletionTokens: geminiResponse.UsageMetadata.CandidatesTokenCount + geminiResponse.UsageMetadata.ThoughtsTokenCount,
  36. TotalTokens: geminiResponse.UsageMetadata.TotalTokenCount,
  37. }
  38. usage.CompletionTokenDetails.ReasoningTokens = geminiResponse.UsageMetadata.ThoughtsTokenCount
  39. for _, detail := range geminiResponse.UsageMetadata.PromptTokensDetails {
  40. if detail.Modality == "AUDIO" {
  41. usage.PromptTokensDetails.AudioTokens = detail.TokenCount
  42. } else if detail.Modality == "TEXT" {
  43. usage.PromptTokensDetails.TextTokens = detail.TokenCount
  44. }
  45. }
  46. service.IOCopyBytesGracefully(c, resp, responseBody)
  47. return &usage, nil
  48. }
  49. func NativeGeminiEmbeddingHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.Usage, *types.NewAPIError) {
  50. defer service.CloseResponseBodyGracefully(resp)
  51. responseBody, err := io.ReadAll(resp.Body)
  52. if err != nil {
  53. return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  54. }
  55. if common.DebugEnabled {
  56. println(string(responseBody))
  57. }
  58. usage := &dto.Usage{
  59. PromptTokens: info.PromptTokens,
  60. TotalTokens: info.PromptTokens,
  61. }
  62. if info.IsGeminiBatchEmbedding {
  63. var geminiResponse dto.GeminiBatchEmbeddingResponse
  64. err = common.Unmarshal(responseBody, &geminiResponse)
  65. if err != nil {
  66. return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  67. }
  68. } else {
  69. var geminiResponse dto.GeminiEmbeddingResponse
  70. err = common.Unmarshal(responseBody, &geminiResponse)
  71. if err != nil {
  72. return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  73. }
  74. }
  75. service.IOCopyBytesGracefully(c, resp, responseBody)
  76. return usage, nil
  77. }
  78. func GeminiTextGenerationStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
  79. var usage = &dto.Usage{}
  80. var imageCount int
  81. helper.SetEventStreamHeaders(c)
  82. responseText := strings.Builder{}
  83. helper.StreamScannerHandler(c, resp, info, func(data string) bool {
  84. var geminiResponse dto.GeminiChatResponse
  85. err := common.UnmarshalJsonStr(data, &geminiResponse)
  86. if err != nil {
  87. logger.LogError(c, "error unmarshalling stream response: "+err.Error())
  88. return false
  89. }
  90. // 统计图片数量
  91. for _, candidate := range geminiResponse.Candidates {
  92. for _, part := range candidate.Content.Parts {
  93. if part.InlineData != nil && part.InlineData.MimeType != "" {
  94. imageCount++
  95. }
  96. if part.Text != "" {
  97. responseText.WriteString(part.Text)
  98. }
  99. }
  100. }
  101. // 更新使用量统计
  102. if geminiResponse.UsageMetadata.TotalTokenCount != 0 {
  103. usage.PromptTokens = geminiResponse.UsageMetadata.PromptTokenCount
  104. usage.CompletionTokens = geminiResponse.UsageMetadata.CandidatesTokenCount + geminiResponse.UsageMetadata.ThoughtsTokenCount
  105. usage.TotalTokens = geminiResponse.UsageMetadata.TotalTokenCount
  106. usage.CompletionTokenDetails.ReasoningTokens = geminiResponse.UsageMetadata.ThoughtsTokenCount
  107. for _, detail := range geminiResponse.UsageMetadata.PromptTokensDetails {
  108. if detail.Modality == "AUDIO" {
  109. usage.PromptTokensDetails.AudioTokens = detail.TokenCount
  110. } else if detail.Modality == "TEXT" {
  111. usage.PromptTokensDetails.TextTokens = detail.TokenCount
  112. }
  113. }
  114. }
  115. // 直接发送 GeminiChatResponse 响应
  116. err = helper.StringData(c, data)
  117. if err != nil {
  118. logger.LogError(c, err.Error())
  119. }
  120. info.SendResponseCount++
  121. return true
  122. })
  123. if info.SendResponseCount == 0 {
  124. return nil, types.NewOpenAIError(errors.New("no response received from Gemini API"), types.ErrorCodeEmptyResponse, http.StatusInternalServerError)
  125. }
  126. if imageCount != 0 {
  127. if usage.CompletionTokens == 0 {
  128. usage.CompletionTokens = imageCount * 258
  129. }
  130. }
  131. // 如果usage.CompletionTokens为0,则使用本地统计的completion tokens
  132. if usage.CompletionTokens == 0 {
  133. str := responseText.String()
  134. if len(str) > 0 {
  135. usage = service.ResponseText2Usage(responseText.String(), info.UpstreamModelName, info.PromptTokens)
  136. } else {
  137. // 空补全,不需要使用量
  138. usage = &dto.Usage{}
  139. }
  140. }
  141. // 移除流式响应结尾的[Done],因为Gemini API没有发送Done的行为
  142. //helper.Done(c)
  143. return usage, nil
  144. }