relay-gemini.go 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364
  1. package gemini
  2. import (
  3. "encoding/json"
  4. "errors"
  5. "fmt"
  6. "io"
  7. "net/http"
  8. "strconv"
  9. "strings"
  10. "unicode/utf8"
  11. "github.com/QuantumNous/new-api/common"
  12. "github.com/QuantumNous/new-api/constant"
  13. "github.com/QuantumNous/new-api/dto"
  14. "github.com/QuantumNous/new-api/logger"
  15. "github.com/QuantumNous/new-api/relay/channel/openai"
  16. relaycommon "github.com/QuantumNous/new-api/relay/common"
  17. "github.com/QuantumNous/new-api/relay/helper"
  18. "github.com/QuantumNous/new-api/service"
  19. "github.com/QuantumNous/new-api/setting/model_setting"
  20. "github.com/QuantumNous/new-api/setting/reasoning"
  21. "github.com/QuantumNous/new-api/types"
  22. "github.com/gin-gonic/gin"
  23. )
  24. // https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference?hl=zh-cn#blob
  25. var geminiSupportedMimeTypes = map[string]bool{
  26. "application/pdf": true,
  27. "audio/mpeg": true,
  28. "audio/mp3": true,
  29. "audio/wav": true,
  30. "image/png": true,
  31. "image/jpeg": true,
  32. "image/webp": true,
  33. "text/plain": true,
  34. "video/mov": true,
  35. "video/mpeg": true,
  36. "video/mp4": true,
  37. "video/mpg": true,
  38. "video/avi": true,
  39. "video/wmv": true,
  40. "video/mpegps": true,
  41. "video/flv": true,
  42. }
  43. const thoughtSignatureBypassValue = "context_engineering_is_the_way_to_go"
  44. // Gemini 允许的思考预算范围
  45. const (
  46. pro25MinBudget = 128
  47. pro25MaxBudget = 32768
  48. flash25MaxBudget = 24576
  49. flash25LiteMinBudget = 512
  50. flash25LiteMaxBudget = 24576
  51. )
  52. func isNew25ProModel(modelName string) bool {
  53. return strings.HasPrefix(modelName, "gemini-2.5-pro") &&
  54. !strings.HasPrefix(modelName, "gemini-2.5-pro-preview-05-06") &&
  55. !strings.HasPrefix(modelName, "gemini-2.5-pro-preview-03-25")
  56. }
  57. func is25FlashLiteModel(modelName string) bool {
  58. return strings.HasPrefix(modelName, "gemini-2.5-flash-lite")
  59. }
  60. // clampThinkingBudget 根据模型名称将预算限制在允许的范围内
  61. func clampThinkingBudget(modelName string, budget int) int {
  62. isNew25Pro := isNew25ProModel(modelName)
  63. is25FlashLite := is25FlashLiteModel(modelName)
  64. if is25FlashLite {
  65. if budget < flash25LiteMinBudget {
  66. return flash25LiteMinBudget
  67. }
  68. if budget > flash25LiteMaxBudget {
  69. return flash25LiteMaxBudget
  70. }
  71. } else if isNew25Pro {
  72. if budget < pro25MinBudget {
  73. return pro25MinBudget
  74. }
  75. if budget > pro25MaxBudget {
  76. return pro25MaxBudget
  77. }
  78. } else { // 其他模型
  79. if budget < 0 {
  80. return 0
  81. }
  82. if budget > flash25MaxBudget {
  83. return flash25MaxBudget
  84. }
  85. }
  86. return budget
  87. }
  88. // "effort": "high" - Allocates a large portion of tokens for reasoning (approximately 80% of max_tokens)
  89. // "effort": "medium" - Allocates a moderate portion of tokens (approximately 50% of max_tokens)
  90. // "effort": "low" - Allocates a smaller portion of tokens (approximately 20% of max_tokens)
  91. // "effort": "minimal" - Allocates a minimal portion of tokens (approximately 5% of max_tokens)
  92. func clampThinkingBudgetByEffort(modelName string, effort string) int {
  93. isNew25Pro := isNew25ProModel(modelName)
  94. is25FlashLite := is25FlashLiteModel(modelName)
  95. maxBudget := 0
  96. if is25FlashLite {
  97. maxBudget = flash25LiteMaxBudget
  98. }
  99. if isNew25Pro {
  100. maxBudget = pro25MaxBudget
  101. } else {
  102. maxBudget = flash25MaxBudget
  103. }
  104. switch effort {
  105. case "high":
  106. maxBudget = maxBudget * 80 / 100
  107. case "medium":
  108. maxBudget = maxBudget * 50 / 100
  109. case "low":
  110. maxBudget = maxBudget * 20 / 100
  111. case "minimal":
  112. maxBudget = maxBudget * 5 / 100
  113. }
  114. return clampThinkingBudget(modelName, maxBudget)
  115. }
  116. func ThinkingAdaptor(geminiRequest *dto.GeminiChatRequest, info *relaycommon.RelayInfo, oaiRequest ...dto.GeneralOpenAIRequest) {
  117. if model_setting.GetGeminiSettings().ThinkingAdapterEnabled {
  118. modelName := info.UpstreamModelName
  119. isNew25Pro := strings.HasPrefix(modelName, "gemini-2.5-pro") &&
  120. !strings.HasPrefix(modelName, "gemini-2.5-pro-preview-05-06") &&
  121. !strings.HasPrefix(modelName, "gemini-2.5-pro-preview-03-25")
  122. if strings.Contains(modelName, "-thinking-") {
  123. parts := strings.SplitN(modelName, "-thinking-", 2)
  124. if len(parts) == 2 && parts[1] != "" {
  125. if budgetTokens, err := strconv.Atoi(parts[1]); err == nil {
  126. clampedBudget := clampThinkingBudget(modelName, budgetTokens)
  127. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  128. ThinkingBudget: common.GetPointer(clampedBudget),
  129. IncludeThoughts: true,
  130. }
  131. }
  132. }
  133. } else if strings.HasSuffix(modelName, "-thinking") {
  134. unsupportedModels := []string{
  135. "gemini-2.5-pro-preview-05-06",
  136. "gemini-2.5-pro-preview-03-25",
  137. }
  138. isUnsupported := false
  139. for _, unsupportedModel := range unsupportedModels {
  140. if strings.HasPrefix(modelName, unsupportedModel) {
  141. isUnsupported = true
  142. break
  143. }
  144. }
  145. if isUnsupported {
  146. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  147. IncludeThoughts: true,
  148. }
  149. } else {
  150. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  151. IncludeThoughts: true,
  152. }
  153. if geminiRequest.GenerationConfig.MaxOutputTokens > 0 {
  154. budgetTokens := model_setting.GetGeminiSettings().ThinkingAdapterBudgetTokensPercentage * float64(geminiRequest.GenerationConfig.MaxOutputTokens)
  155. clampedBudget := clampThinkingBudget(modelName, int(budgetTokens))
  156. geminiRequest.GenerationConfig.ThinkingConfig.ThinkingBudget = common.GetPointer(clampedBudget)
  157. } else {
  158. if len(oaiRequest) > 0 {
  159. // 如果有reasoningEffort参数,则根据其值设置思考预算
  160. geminiRequest.GenerationConfig.ThinkingConfig.ThinkingBudget = common.GetPointer(clampThinkingBudgetByEffort(modelName, oaiRequest[0].ReasoningEffort))
  161. }
  162. }
  163. }
  164. } else if strings.HasSuffix(modelName, "-nothinking") {
  165. if !isNew25Pro {
  166. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  167. ThinkingBudget: common.GetPointer(0),
  168. }
  169. }
  170. } else if _, level, ok := reasoning.TrimEffortSuffix(info.UpstreamModelName); ok && level != "" {
  171. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  172. IncludeThoughts: true,
  173. ThinkingLevel: level,
  174. }
  175. info.ReasoningEffort = level
  176. }
  177. }
  178. }
  179. // Setting safety to the lowest possible values since Gemini is already powerless enough
  180. func CovertOpenAI2Gemini(c *gin.Context, textRequest dto.GeneralOpenAIRequest, info *relaycommon.RelayInfo) (*dto.GeminiChatRequest, error) {
  181. geminiRequest := dto.GeminiChatRequest{
  182. Contents: make([]dto.GeminiChatContent, 0, len(textRequest.Messages)),
  183. GenerationConfig: dto.GeminiChatGenerationConfig{
  184. Temperature: textRequest.Temperature,
  185. TopP: textRequest.TopP,
  186. MaxOutputTokens: textRequest.GetMaxTokens(),
  187. Seed: int64(textRequest.Seed),
  188. },
  189. }
  190. attachThoughtSignature := (info.ChannelType == constant.ChannelTypeGemini ||
  191. info.ChannelType == constant.ChannelTypeVertexAi) &&
  192. model_setting.GetGeminiSettings().FunctionCallThoughtSignatureEnabled
  193. if model_setting.IsGeminiModelSupportImagine(info.UpstreamModelName) {
  194. geminiRequest.GenerationConfig.ResponseModalities = []string{
  195. "TEXT",
  196. "IMAGE",
  197. }
  198. }
  199. adaptorWithExtraBody := false
  200. // patch extra_body
  201. if len(textRequest.ExtraBody) > 0 {
  202. if !strings.HasSuffix(info.UpstreamModelName, "-nothinking") {
  203. var extraBody map[string]interface{}
  204. if err := common.Unmarshal(textRequest.ExtraBody, &extraBody); err != nil {
  205. return nil, fmt.Errorf("invalid extra body: %w", err)
  206. }
  207. // eg. {"google":{"thinking_config":{"thinking_budget":5324,"include_thoughts":true}}}
  208. if googleBody, ok := extraBody["google"].(map[string]interface{}); ok {
  209. adaptorWithExtraBody = true
  210. // check error param name like thinkingConfig, should be thinking_config
  211. if _, hasErrorParam := googleBody["thinkingConfig"]; hasErrorParam {
  212. return nil, errors.New("extra_body.google.thinkingConfig is not supported, use extra_body.google.thinking_config instead")
  213. }
  214. if thinkingConfig, ok := googleBody["thinking_config"].(map[string]interface{}); ok {
  215. // check error param name like thinkingBudget, should be thinking_budget
  216. if _, hasErrorParam := thinkingConfig["thinkingBudget"]; hasErrorParam {
  217. return nil, errors.New("extra_body.google.thinking_config.thinkingBudget is not supported, use extra_body.google.thinking_config.thinking_budget instead")
  218. }
  219. if budget, ok := thinkingConfig["thinking_budget"].(float64); ok {
  220. budgetInt := int(budget)
  221. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  222. ThinkingBudget: common.GetPointer(budgetInt),
  223. IncludeThoughts: true,
  224. }
  225. } else {
  226. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  227. IncludeThoughts: true,
  228. }
  229. }
  230. }
  231. // check error param name like imageConfig, should be image_config
  232. if _, hasErrorParam := googleBody["imageConfig"]; hasErrorParam {
  233. return nil, errors.New("extra_body.google.imageConfig is not supported, use extra_body.google.image_config instead")
  234. }
  235. if imageConfig, ok := googleBody["image_config"].(map[string]interface{}); ok {
  236. // check error param name like aspectRatio, should be aspect_ratio
  237. if _, hasErrorParam := imageConfig["aspectRatio"]; hasErrorParam {
  238. return nil, errors.New("extra_body.google.image_config.aspectRatio is not supported, use extra_body.google.image_config.aspect_ratio instead")
  239. }
  240. // check error param name like imageSize, should be image_size
  241. if _, hasErrorParam := imageConfig["imageSize"]; hasErrorParam {
  242. return nil, errors.New("extra_body.google.image_config.imageSize is not supported, use extra_body.google.image_config.image_size instead")
  243. }
  244. // convert snake_case to camelCase for Gemini API
  245. geminiImageConfig := make(map[string]interface{})
  246. if aspectRatio, ok := imageConfig["aspect_ratio"]; ok {
  247. geminiImageConfig["aspectRatio"] = aspectRatio
  248. }
  249. if imageSize, ok := imageConfig["image_size"]; ok {
  250. geminiImageConfig["imageSize"] = imageSize
  251. }
  252. if len(geminiImageConfig) > 0 {
  253. imageConfigBytes, err := common.Marshal(geminiImageConfig)
  254. if err != nil {
  255. return nil, fmt.Errorf("failed to marshal image_config: %w", err)
  256. }
  257. geminiRequest.GenerationConfig.ImageConfig = imageConfigBytes
  258. }
  259. }
  260. }
  261. }
  262. }
  263. if !adaptorWithExtraBody {
  264. ThinkingAdaptor(&geminiRequest, info, textRequest)
  265. }
  266. safetySettings := make([]dto.GeminiChatSafetySettings, 0, len(SafetySettingList))
  267. for _, category := range SafetySettingList {
  268. safetySettings = append(safetySettings, dto.GeminiChatSafetySettings{
  269. Category: category,
  270. Threshold: model_setting.GetGeminiSafetySetting(category),
  271. })
  272. }
  273. geminiRequest.SafetySettings = safetySettings
  274. // openaiContent.FuncToToolCalls()
  275. if textRequest.Tools != nil {
  276. functions := make([]dto.FunctionRequest, 0, len(textRequest.Tools))
  277. googleSearch := false
  278. codeExecution := false
  279. urlContext := false
  280. for _, tool := range textRequest.Tools {
  281. if tool.Function.Name == "googleSearch" {
  282. googleSearch = true
  283. continue
  284. }
  285. if tool.Function.Name == "codeExecution" {
  286. codeExecution = true
  287. continue
  288. }
  289. if tool.Function.Name == "urlContext" {
  290. urlContext = true
  291. continue
  292. }
  293. if tool.Function.Parameters != nil {
  294. params, ok := tool.Function.Parameters.(map[string]interface{})
  295. if ok {
  296. if props, hasProps := params["properties"].(map[string]interface{}); hasProps {
  297. if len(props) == 0 {
  298. tool.Function.Parameters = nil
  299. }
  300. }
  301. }
  302. }
  303. // Clean the parameters before appending
  304. cleanedParams := cleanFunctionParameters(tool.Function.Parameters)
  305. tool.Function.Parameters = cleanedParams
  306. functions = append(functions, tool.Function)
  307. }
  308. geminiTools := geminiRequest.GetTools()
  309. if codeExecution {
  310. geminiTools = append(geminiTools, dto.GeminiChatTool{
  311. CodeExecution: make(map[string]string),
  312. })
  313. }
  314. if googleSearch {
  315. geminiTools = append(geminiTools, dto.GeminiChatTool{
  316. GoogleSearch: make(map[string]string),
  317. })
  318. }
  319. if urlContext {
  320. geminiTools = append(geminiTools, dto.GeminiChatTool{
  321. URLContext: make(map[string]string),
  322. })
  323. }
  324. if len(functions) > 0 {
  325. geminiTools = append(geminiTools, dto.GeminiChatTool{
  326. FunctionDeclarations: functions,
  327. })
  328. }
  329. geminiRequest.SetTools(geminiTools)
  330. }
  331. if textRequest.ResponseFormat != nil && (textRequest.ResponseFormat.Type == "json_schema" || textRequest.ResponseFormat.Type == "json_object") {
  332. geminiRequest.GenerationConfig.ResponseMimeType = "application/json"
  333. if len(textRequest.ResponseFormat.JsonSchema) > 0 {
  334. // 先将json.RawMessage解析
  335. var jsonSchema dto.FormatJsonSchema
  336. if err := common.Unmarshal(textRequest.ResponseFormat.JsonSchema, &jsonSchema); err == nil {
  337. cleanedSchema := removeAdditionalPropertiesWithDepth(jsonSchema.Schema, 0)
  338. geminiRequest.GenerationConfig.ResponseSchema = cleanedSchema
  339. }
  340. }
  341. }
  342. tool_call_ids := make(map[string]string)
  343. var system_content []string
  344. //shouldAddDummyModelMessage := false
  345. for _, message := range textRequest.Messages {
  346. if message.Role == "system" || message.Role == "developer" {
  347. system_content = append(system_content, message.StringContent())
  348. continue
  349. } else if message.Role == "tool" || message.Role == "function" {
  350. if len(geminiRequest.Contents) == 0 || geminiRequest.Contents[len(geminiRequest.Contents)-1].Role == "model" {
  351. geminiRequest.Contents = append(geminiRequest.Contents, dto.GeminiChatContent{
  352. Role: "user",
  353. })
  354. }
  355. var parts = &geminiRequest.Contents[len(geminiRequest.Contents)-1].Parts
  356. name := ""
  357. if message.Name != nil {
  358. name = *message.Name
  359. } else if val, exists := tool_call_ids[message.ToolCallId]; exists {
  360. name = val
  361. }
  362. var contentMap map[string]interface{}
  363. contentStr := message.StringContent()
  364. // 1. 尝试解析为 JSON 对象
  365. if err := json.Unmarshal([]byte(contentStr), &contentMap); err != nil {
  366. // 2. 如果失败,尝试解析为 JSON 数组
  367. var contentSlice []interface{}
  368. if err := json.Unmarshal([]byte(contentStr), &contentSlice); err == nil {
  369. // 如果是数组,包装成对象
  370. contentMap = map[string]interface{}{"result": contentSlice}
  371. } else {
  372. // 3. 如果再次失败,作为纯文本处理
  373. contentMap = map[string]interface{}{"content": contentStr}
  374. }
  375. }
  376. functionResp := &dto.GeminiFunctionResponse{
  377. Name: name,
  378. Response: contentMap,
  379. }
  380. *parts = append(*parts, dto.GeminiPart{
  381. FunctionResponse: functionResp,
  382. })
  383. continue
  384. }
  385. var parts []dto.GeminiPart
  386. content := dto.GeminiChatContent{
  387. Role: message.Role,
  388. }
  389. shouldAttachThoughtSignature := attachThoughtSignature && (message.Role == "assistant" || message.Role == "model")
  390. signatureAttached := false
  391. // isToolCall := false
  392. if message.ToolCalls != nil {
  393. // message.Role = "model"
  394. // isToolCall = true
  395. for _, call := range message.ParseToolCalls() {
  396. args := map[string]interface{}{}
  397. if call.Function.Arguments != "" {
  398. if json.Unmarshal([]byte(call.Function.Arguments), &args) != nil {
  399. return nil, fmt.Errorf("invalid arguments for function %s, args: %s", call.Function.Name, call.Function.Arguments)
  400. }
  401. }
  402. toolCall := dto.GeminiPart{
  403. FunctionCall: &dto.FunctionCall{
  404. FunctionName: call.Function.Name,
  405. Arguments: args,
  406. },
  407. }
  408. if shouldAttachThoughtSignature && !signatureAttached && hasFunctionCallContent(toolCall.FunctionCall) && len(toolCall.ThoughtSignature) == 0 {
  409. toolCall.ThoughtSignature = json.RawMessage(strconv.Quote(thoughtSignatureBypassValue))
  410. signatureAttached = true
  411. }
  412. parts = append(parts, toolCall)
  413. tool_call_ids[call.ID] = call.Function.Name
  414. }
  415. }
  416. openaiContent := message.ParseContent()
  417. imageNum := 0
  418. for _, part := range openaiContent {
  419. if part.Type == dto.ContentTypeText {
  420. if part.Text == "" {
  421. continue
  422. }
  423. // check markdown image ![image](data:image/jpeg;base64,xxxxxxxxxxxx)
  424. // 使用字符串查找而非正则,避免大文本性能问题
  425. text := part.Text
  426. hasMarkdownImage := false
  427. for {
  428. // 快速检查是否包含 markdown 图片标记
  429. startIdx := strings.Index(text, "![")
  430. if startIdx == -1 {
  431. break
  432. }
  433. // 找到 ](
  434. bracketIdx := strings.Index(text[startIdx:], "](data:")
  435. if bracketIdx == -1 {
  436. break
  437. }
  438. bracketIdx += startIdx
  439. // 找到闭合的 )
  440. closeIdx := strings.Index(text[bracketIdx+2:], ")")
  441. if closeIdx == -1 {
  442. break
  443. }
  444. closeIdx += bracketIdx + 2
  445. hasMarkdownImage = true
  446. // 添加图片前的文本
  447. if startIdx > 0 {
  448. textBefore := text[:startIdx]
  449. if textBefore != "" {
  450. parts = append(parts, dto.GeminiPart{
  451. Text: textBefore,
  452. })
  453. }
  454. }
  455. // 提取 data URL (从 "](" 后面开始,到 ")" 之前)
  456. dataUrl := text[bracketIdx+2 : closeIdx]
  457. imageNum += 1
  458. if constant.GeminiVisionMaxImageNum != -1 && imageNum > constant.GeminiVisionMaxImageNum {
  459. return nil, fmt.Errorf("too many images in the message, max allowed is %d", constant.GeminiVisionMaxImageNum)
  460. }
  461. format, base64String, err := service.DecodeBase64FileData(dataUrl)
  462. if err != nil {
  463. return nil, fmt.Errorf("decode markdown base64 image data failed: %s", err.Error())
  464. }
  465. imgPart := dto.GeminiPart{
  466. InlineData: &dto.GeminiInlineData{
  467. MimeType: format,
  468. Data: base64String,
  469. },
  470. }
  471. if shouldAttachThoughtSignature {
  472. imgPart.ThoughtSignature = json.RawMessage(strconv.Quote(thoughtSignatureBypassValue))
  473. }
  474. parts = append(parts, imgPart)
  475. // 继续处理剩余文本
  476. text = text[closeIdx+1:]
  477. }
  478. // 添加剩余文本或原始文本(如果没有找到 markdown 图片)
  479. if !hasMarkdownImage {
  480. parts = append(parts, dto.GeminiPart{
  481. Text: part.Text,
  482. })
  483. }
  484. } else if part.Type == dto.ContentTypeImageURL {
  485. imageNum += 1
  486. if constant.GeminiVisionMaxImageNum != -1 && imageNum > constant.GeminiVisionMaxImageNum {
  487. return nil, fmt.Errorf("too many images in the message, max allowed is %d", constant.GeminiVisionMaxImageNum)
  488. }
  489. // 判断是否是url
  490. if strings.HasPrefix(part.GetImageMedia().Url, "http") {
  491. // 是url,获取文件的类型和base64编码的数据
  492. fileData, err := service.GetFileBase64FromUrl(c, part.GetImageMedia().Url, "formatting image for Gemini")
  493. if err != nil {
  494. return nil, fmt.Errorf("get file base64 from url '%s' failed: %w", part.GetImageMedia().Url, err)
  495. }
  496. // 校验 MimeType 是否在 Gemini 支持的白名单中
  497. if _, ok := geminiSupportedMimeTypes[strings.ToLower(fileData.MimeType)]; !ok {
  498. url := part.GetImageMedia().Url
  499. return nil, fmt.Errorf("mime type is not supported by Gemini: '%s', url: '%s', supported types are: %v", fileData.MimeType, url, getSupportedMimeTypesList())
  500. }
  501. parts = append(parts, dto.GeminiPart{
  502. InlineData: &dto.GeminiInlineData{
  503. MimeType: fileData.MimeType, // 使用原始的 MimeType,因为大小写可能对API有意义
  504. Data: fileData.Base64Data,
  505. },
  506. })
  507. } else {
  508. format, base64String, err := service.DecodeBase64FileData(part.GetImageMedia().Url)
  509. if err != nil {
  510. return nil, fmt.Errorf("decode base64 image data failed: %s", err.Error())
  511. }
  512. parts = append(parts, dto.GeminiPart{
  513. InlineData: &dto.GeminiInlineData{
  514. MimeType: format,
  515. Data: base64String,
  516. },
  517. })
  518. }
  519. } else if part.Type == dto.ContentTypeFile {
  520. if part.GetFile().FileId != "" {
  521. return nil, fmt.Errorf("only base64 file is supported in gemini")
  522. }
  523. format, base64String, err := service.DecodeBase64FileData(part.GetFile().FileData)
  524. if err != nil {
  525. return nil, fmt.Errorf("decode base64 file data failed: %s", err.Error())
  526. }
  527. parts = append(parts, dto.GeminiPart{
  528. InlineData: &dto.GeminiInlineData{
  529. MimeType: format,
  530. Data: base64String,
  531. },
  532. })
  533. } else if part.Type == dto.ContentTypeInputAudio {
  534. if part.GetInputAudio().Data == "" {
  535. return nil, fmt.Errorf("only base64 audio is supported in gemini")
  536. }
  537. base64String, err := service.DecodeBase64AudioData(part.GetInputAudio().Data)
  538. if err != nil {
  539. return nil, fmt.Errorf("decode base64 audio data failed: %s", err.Error())
  540. }
  541. parts = append(parts, dto.GeminiPart{
  542. InlineData: &dto.GeminiInlineData{
  543. MimeType: "audio/" + part.GetInputAudio().Format,
  544. Data: base64String,
  545. },
  546. })
  547. }
  548. }
  549. // 如果需要附加签名但还没有附加(没有 tool_calls 或 tool_calls 为空),
  550. // 则在第一个文本 part 上附加 thoughtSignature
  551. if shouldAttachThoughtSignature && !signatureAttached && len(parts) > 0 {
  552. for i := range parts {
  553. if parts[i].Text != "" {
  554. parts[i].ThoughtSignature = json.RawMessage(strconv.Quote(thoughtSignatureBypassValue))
  555. break
  556. }
  557. }
  558. }
  559. content.Parts = parts
  560. // there's no assistant role in gemini and API shall vomit if Role is not user or model
  561. if content.Role == "assistant" {
  562. content.Role = "model"
  563. }
  564. if len(content.Parts) > 0 {
  565. geminiRequest.Contents = append(geminiRequest.Contents, content)
  566. }
  567. }
  568. if len(system_content) > 0 {
  569. geminiRequest.SystemInstructions = &dto.GeminiChatContent{
  570. Parts: []dto.GeminiPart{
  571. {
  572. Text: strings.Join(system_content, "\n"),
  573. },
  574. },
  575. }
  576. }
  577. return &geminiRequest, nil
  578. }
  579. func hasFunctionCallContent(call *dto.FunctionCall) bool {
  580. if call == nil {
  581. return false
  582. }
  583. if strings.TrimSpace(call.FunctionName) != "" {
  584. return true
  585. }
  586. switch v := call.Arguments.(type) {
  587. case nil:
  588. return false
  589. case string:
  590. return strings.TrimSpace(v) != ""
  591. case map[string]interface{}:
  592. return len(v) > 0
  593. case []interface{}:
  594. return len(v) > 0
  595. default:
  596. return true
  597. }
  598. }
  599. // Helper function to get a list of supported MIME types for error messages
  600. func getSupportedMimeTypesList() []string {
  601. keys := make([]string, 0, len(geminiSupportedMimeTypes))
  602. for k := range geminiSupportedMimeTypes {
  603. keys = append(keys, k)
  604. }
  605. return keys
  606. }
  607. // cleanFunctionParameters recursively removes unsupported fields from Gemini function parameters.
  608. func cleanFunctionParameters(params interface{}) interface{} {
  609. if params == nil {
  610. return nil
  611. }
  612. switch v := params.(type) {
  613. case map[string]interface{}:
  614. // Create a copy to avoid modifying the original
  615. cleanedMap := make(map[string]interface{})
  616. for k, val := range v {
  617. cleanedMap[k] = val
  618. }
  619. // Remove unsupported root-level fields
  620. delete(cleanedMap, "default")
  621. delete(cleanedMap, "exclusiveMaximum")
  622. delete(cleanedMap, "exclusiveMinimum")
  623. delete(cleanedMap, "$schema")
  624. delete(cleanedMap, "additionalProperties")
  625. // Check and clean 'format' for string types
  626. if propType, typeExists := cleanedMap["type"].(string); typeExists && propType == "string" {
  627. if formatValue, formatExists := cleanedMap["format"].(string); formatExists {
  628. if formatValue != "enum" && formatValue != "date-time" {
  629. delete(cleanedMap, "format")
  630. }
  631. }
  632. }
  633. // Clean properties
  634. if props, ok := cleanedMap["properties"].(map[string]interface{}); ok && props != nil {
  635. cleanedProps := make(map[string]interface{})
  636. for propName, propValue := range props {
  637. cleanedProps[propName] = cleanFunctionParameters(propValue)
  638. }
  639. cleanedMap["properties"] = cleanedProps
  640. }
  641. // Recursively clean items in arrays
  642. if items, ok := cleanedMap["items"].(map[string]interface{}); ok && items != nil {
  643. cleanedMap["items"] = cleanFunctionParameters(items)
  644. }
  645. // Also handle items if it's an array of schemas
  646. if itemsArray, ok := cleanedMap["items"].([]interface{}); ok {
  647. cleanedItemsArray := make([]interface{}, len(itemsArray))
  648. for i, item := range itemsArray {
  649. cleanedItemsArray[i] = cleanFunctionParameters(item)
  650. }
  651. cleanedMap["items"] = cleanedItemsArray
  652. }
  653. // Recursively clean other schema composition keywords
  654. for _, field := range []string{"allOf", "anyOf", "oneOf"} {
  655. if nested, ok := cleanedMap[field].([]interface{}); ok {
  656. cleanedNested := make([]interface{}, len(nested))
  657. for i, item := range nested {
  658. cleanedNested[i] = cleanFunctionParameters(item)
  659. }
  660. cleanedMap[field] = cleanedNested
  661. }
  662. }
  663. // Recursively clean patternProperties
  664. if patternProps, ok := cleanedMap["patternProperties"].(map[string]interface{}); ok {
  665. cleanedPatternProps := make(map[string]interface{})
  666. for pattern, schema := range patternProps {
  667. cleanedPatternProps[pattern] = cleanFunctionParameters(schema)
  668. }
  669. cleanedMap["patternProperties"] = cleanedPatternProps
  670. }
  671. // Recursively clean definitions
  672. if definitions, ok := cleanedMap["definitions"].(map[string]interface{}); ok {
  673. cleanedDefinitions := make(map[string]interface{})
  674. for defName, defSchema := range definitions {
  675. cleanedDefinitions[defName] = cleanFunctionParameters(defSchema)
  676. }
  677. cleanedMap["definitions"] = cleanedDefinitions
  678. }
  679. // Recursively clean $defs (newer JSON Schema draft)
  680. if defs, ok := cleanedMap["$defs"].(map[string]interface{}); ok {
  681. cleanedDefs := make(map[string]interface{})
  682. for defName, defSchema := range defs {
  683. cleanedDefs[defName] = cleanFunctionParameters(defSchema)
  684. }
  685. cleanedMap["$defs"] = cleanedDefs
  686. }
  687. // Clean conditional keywords
  688. for _, field := range []string{"if", "then", "else", "not"} {
  689. if nested, ok := cleanedMap[field]; ok {
  690. cleanedMap[field] = cleanFunctionParameters(nested)
  691. }
  692. }
  693. return cleanedMap
  694. case []interface{}:
  695. // Handle arrays of schemas
  696. cleanedArray := make([]interface{}, len(v))
  697. for i, item := range v {
  698. cleanedArray[i] = cleanFunctionParameters(item)
  699. }
  700. return cleanedArray
  701. default:
  702. // Not a map or array, return as is (e.g., could be a primitive)
  703. return params
  704. }
  705. }
  706. func removeAdditionalPropertiesWithDepth(schema interface{}, depth int) interface{} {
  707. if depth >= 5 {
  708. return schema
  709. }
  710. v, ok := schema.(map[string]interface{})
  711. if !ok || len(v) == 0 {
  712. return schema
  713. }
  714. // 删除所有的title字段
  715. delete(v, "title")
  716. delete(v, "$schema")
  717. // 如果type不为object和array,则直接返回
  718. if typeVal, exists := v["type"]; !exists || (typeVal != "object" && typeVal != "array") {
  719. return schema
  720. }
  721. switch v["type"] {
  722. case "object":
  723. delete(v, "additionalProperties")
  724. // 处理 properties
  725. if properties, ok := v["properties"].(map[string]interface{}); ok {
  726. for key, value := range properties {
  727. properties[key] = removeAdditionalPropertiesWithDepth(value, depth+1)
  728. }
  729. }
  730. for _, field := range []string{"allOf", "anyOf", "oneOf"} {
  731. if nested, ok := v[field].([]interface{}); ok {
  732. for i, item := range nested {
  733. nested[i] = removeAdditionalPropertiesWithDepth(item, depth+1)
  734. }
  735. }
  736. }
  737. case "array":
  738. if items, ok := v["items"].(map[string]interface{}); ok {
  739. v["items"] = removeAdditionalPropertiesWithDepth(items, depth+1)
  740. }
  741. }
  742. return v
  743. }
  744. func unescapeString(s string) (string, error) {
  745. var result []rune
  746. escaped := false
  747. i := 0
  748. for i < len(s) {
  749. r, size := utf8.DecodeRuneInString(s[i:]) // 正确解码UTF-8字符
  750. if r == utf8.RuneError {
  751. return "", fmt.Errorf("invalid UTF-8 encoding")
  752. }
  753. if escaped {
  754. // 如果是转义符后的字符,检查其类型
  755. switch r {
  756. case '"':
  757. result = append(result, '"')
  758. case '\\':
  759. result = append(result, '\\')
  760. case '/':
  761. result = append(result, '/')
  762. case 'b':
  763. result = append(result, '\b')
  764. case 'f':
  765. result = append(result, '\f')
  766. case 'n':
  767. result = append(result, '\n')
  768. case 'r':
  769. result = append(result, '\r')
  770. case 't':
  771. result = append(result, '\t')
  772. case '\'':
  773. result = append(result, '\'')
  774. default:
  775. // 如果遇到一个非法的转义字符,直接按原样输出
  776. result = append(result, '\\', r)
  777. }
  778. escaped = false
  779. } else {
  780. if r == '\\' {
  781. escaped = true // 记录反斜杠作为转义符
  782. } else {
  783. result = append(result, r)
  784. }
  785. }
  786. i += size // 移动到下一个字符
  787. }
  788. return string(result), nil
  789. }
  790. func unescapeMapOrSlice(data interface{}) interface{} {
  791. switch v := data.(type) {
  792. case map[string]interface{}:
  793. for k, val := range v {
  794. v[k] = unescapeMapOrSlice(val)
  795. }
  796. case []interface{}:
  797. for i, val := range v {
  798. v[i] = unescapeMapOrSlice(val)
  799. }
  800. case string:
  801. if unescaped, err := unescapeString(v); err != nil {
  802. return v
  803. } else {
  804. return unescaped
  805. }
  806. }
  807. return data
  808. }
  809. func getResponseToolCall(item *dto.GeminiPart) *dto.ToolCallResponse {
  810. var argsBytes []byte
  811. var err error
  812. if result, ok := item.FunctionCall.Arguments.(map[string]interface{}); ok {
  813. argsBytes, err = json.Marshal(unescapeMapOrSlice(result))
  814. } else {
  815. argsBytes, err = json.Marshal(item.FunctionCall.Arguments)
  816. }
  817. if err != nil {
  818. return nil
  819. }
  820. return &dto.ToolCallResponse{
  821. ID: fmt.Sprintf("call_%s", common.GetUUID()),
  822. Type: "function",
  823. Function: dto.FunctionResponse{
  824. Arguments: string(argsBytes),
  825. Name: item.FunctionCall.FunctionName,
  826. },
  827. }
  828. }
  829. func responseGeminiChat2OpenAI(c *gin.Context, response *dto.GeminiChatResponse) *dto.OpenAITextResponse {
  830. fullTextResponse := dto.OpenAITextResponse{
  831. Id: helper.GetResponseID(c),
  832. Object: "chat.completion",
  833. Created: common.GetTimestamp(),
  834. Choices: make([]dto.OpenAITextResponseChoice, 0, len(response.Candidates)),
  835. }
  836. isToolCall := false
  837. for _, candidate := range response.Candidates {
  838. choice := dto.OpenAITextResponseChoice{
  839. Index: int(candidate.Index),
  840. Message: dto.Message{
  841. Role: "assistant",
  842. Content: "",
  843. },
  844. FinishReason: constant.FinishReasonStop,
  845. }
  846. if len(candidate.Content.Parts) > 0 {
  847. var texts []string
  848. var toolCalls []dto.ToolCallResponse
  849. for _, part := range candidate.Content.Parts {
  850. if part.InlineData != nil {
  851. // 媒体内容
  852. if strings.HasPrefix(part.InlineData.MimeType, "image") {
  853. imgText := "![image](data:" + part.InlineData.MimeType + ";base64," + part.InlineData.Data + ")"
  854. texts = append(texts, imgText)
  855. } else {
  856. // 其他媒体类型,直接显示链接
  857. texts = append(texts, fmt.Sprintf("[media](data:%s;base64,%s)", part.InlineData.MimeType, part.InlineData.Data))
  858. }
  859. } else if part.FunctionCall != nil {
  860. choice.FinishReason = constant.FinishReasonToolCalls
  861. if call := getResponseToolCall(&part); call != nil {
  862. toolCalls = append(toolCalls, *call)
  863. }
  864. } else if part.Thought {
  865. choice.Message.ReasoningContent = part.Text
  866. } else {
  867. if part.ExecutableCode != nil {
  868. texts = append(texts, "```"+part.ExecutableCode.Language+"\n"+part.ExecutableCode.Code+"\n```")
  869. } else if part.CodeExecutionResult != nil {
  870. texts = append(texts, "```output\n"+part.CodeExecutionResult.Output+"\n```")
  871. } else {
  872. // 过滤掉空行
  873. if part.Text != "\n" {
  874. texts = append(texts, part.Text)
  875. }
  876. }
  877. }
  878. }
  879. if len(toolCalls) > 0 {
  880. choice.Message.SetToolCalls(toolCalls)
  881. isToolCall = true
  882. }
  883. choice.Message.SetStringContent(strings.Join(texts, "\n"))
  884. }
  885. if candidate.FinishReason != nil {
  886. switch *candidate.FinishReason {
  887. case "STOP":
  888. choice.FinishReason = constant.FinishReasonStop
  889. case "MAX_TOKENS":
  890. choice.FinishReason = constant.FinishReasonLength
  891. default:
  892. choice.FinishReason = constant.FinishReasonContentFilter
  893. }
  894. }
  895. if isToolCall {
  896. choice.FinishReason = constant.FinishReasonToolCalls
  897. }
  898. fullTextResponse.Choices = append(fullTextResponse.Choices, choice)
  899. }
  900. return &fullTextResponse
  901. }
  902. func streamResponseGeminiChat2OpenAI(geminiResponse *dto.GeminiChatResponse) (*dto.ChatCompletionsStreamResponse, bool) {
  903. choices := make([]dto.ChatCompletionsStreamResponseChoice, 0, len(geminiResponse.Candidates))
  904. isStop := false
  905. for _, candidate := range geminiResponse.Candidates {
  906. if candidate.FinishReason != nil && *candidate.FinishReason == "STOP" {
  907. isStop = true
  908. candidate.FinishReason = nil
  909. }
  910. choice := dto.ChatCompletionsStreamResponseChoice{
  911. Index: int(candidate.Index),
  912. Delta: dto.ChatCompletionsStreamResponseChoiceDelta{
  913. //Role: "assistant",
  914. },
  915. }
  916. var texts []string
  917. isTools := false
  918. isThought := false
  919. if candidate.FinishReason != nil {
  920. // p := GeminiConvertFinishReason(*candidate.FinishReason)
  921. switch *candidate.FinishReason {
  922. case "STOP":
  923. choice.FinishReason = &constant.FinishReasonStop
  924. case "MAX_TOKENS":
  925. choice.FinishReason = &constant.FinishReasonLength
  926. default:
  927. choice.FinishReason = &constant.FinishReasonContentFilter
  928. }
  929. }
  930. for _, part := range candidate.Content.Parts {
  931. if part.InlineData != nil {
  932. if strings.HasPrefix(part.InlineData.MimeType, "image") {
  933. imgText := "![image](data:" + part.InlineData.MimeType + ";base64," + part.InlineData.Data + ")"
  934. texts = append(texts, imgText)
  935. }
  936. } else if part.FunctionCall != nil {
  937. isTools = true
  938. if call := getResponseToolCall(&part); call != nil {
  939. call.SetIndex(len(choice.Delta.ToolCalls))
  940. choice.Delta.ToolCalls = append(choice.Delta.ToolCalls, *call)
  941. }
  942. } else if part.Thought {
  943. isThought = true
  944. texts = append(texts, part.Text)
  945. } else {
  946. if part.ExecutableCode != nil {
  947. texts = append(texts, "```"+part.ExecutableCode.Language+"\n"+part.ExecutableCode.Code+"\n```\n")
  948. } else if part.CodeExecutionResult != nil {
  949. texts = append(texts, "```output\n"+part.CodeExecutionResult.Output+"\n```\n")
  950. } else {
  951. if part.Text != "\n" {
  952. texts = append(texts, part.Text)
  953. }
  954. }
  955. }
  956. }
  957. if isThought {
  958. choice.Delta.SetReasoningContent(strings.Join(texts, "\n"))
  959. } else {
  960. choice.Delta.SetContentString(strings.Join(texts, "\n"))
  961. }
  962. if isTools {
  963. choice.FinishReason = &constant.FinishReasonToolCalls
  964. }
  965. choices = append(choices, choice)
  966. }
  967. var response dto.ChatCompletionsStreamResponse
  968. response.Object = "chat.completion.chunk"
  969. response.Choices = choices
  970. return &response, isStop
  971. }
  972. func handleStream(c *gin.Context, info *relaycommon.RelayInfo, resp *dto.ChatCompletionsStreamResponse) error {
  973. streamData, err := common.Marshal(resp)
  974. if err != nil {
  975. return fmt.Errorf("failed to marshal stream response: %w", err)
  976. }
  977. err = openai.HandleStreamFormat(c, info, string(streamData), info.ChannelSetting.ForceFormat, info.ChannelSetting.ThinkingToContent)
  978. if err != nil {
  979. return fmt.Errorf("failed to handle stream format: %w", err)
  980. }
  981. return nil
  982. }
  983. func handleFinalStream(c *gin.Context, info *relaycommon.RelayInfo, resp *dto.ChatCompletionsStreamResponse) error {
  984. streamData, err := common.Marshal(resp)
  985. if err != nil {
  986. return fmt.Errorf("failed to marshal stream response: %w", err)
  987. }
  988. openai.HandleFinalResponse(c, info, string(streamData), resp.Id, resp.Created, resp.Model, resp.GetSystemFingerprint(), resp.Usage, false)
  989. return nil
  990. }
  991. func geminiStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response, callback func(data string, geminiResponse *dto.GeminiChatResponse) bool) (*dto.Usage, *types.NewAPIError) {
  992. var usage = &dto.Usage{}
  993. var imageCount int
  994. responseText := strings.Builder{}
  995. helper.StreamScannerHandler(c, resp, info, func(data string) bool {
  996. var geminiResponse dto.GeminiChatResponse
  997. err := common.UnmarshalJsonStr(data, &geminiResponse)
  998. if err != nil {
  999. logger.LogError(c, "error unmarshalling stream response: "+err.Error())
  1000. return false
  1001. }
  1002. // 统计图片数量
  1003. for _, candidate := range geminiResponse.Candidates {
  1004. for _, part := range candidate.Content.Parts {
  1005. if part.InlineData != nil && part.InlineData.MimeType != "" {
  1006. imageCount++
  1007. }
  1008. if part.Text != "" {
  1009. responseText.WriteString(part.Text)
  1010. }
  1011. }
  1012. }
  1013. // 更新使用量统计
  1014. if geminiResponse.UsageMetadata.TotalTokenCount != 0 {
  1015. usage.PromptTokens = geminiResponse.UsageMetadata.PromptTokenCount
  1016. usage.CompletionTokens = geminiResponse.UsageMetadata.CandidatesTokenCount + geminiResponse.UsageMetadata.ThoughtsTokenCount
  1017. usage.CompletionTokenDetails.ReasoningTokens = geminiResponse.UsageMetadata.ThoughtsTokenCount
  1018. usage.TotalTokens = geminiResponse.UsageMetadata.TotalTokenCount
  1019. for _, detail := range geminiResponse.UsageMetadata.PromptTokensDetails {
  1020. if detail.Modality == "AUDIO" {
  1021. usage.PromptTokensDetails.AudioTokens = detail.TokenCount
  1022. } else if detail.Modality == "TEXT" {
  1023. usage.PromptTokensDetails.TextTokens = detail.TokenCount
  1024. }
  1025. }
  1026. }
  1027. return callback(data, &geminiResponse)
  1028. })
  1029. if imageCount != 0 {
  1030. if usage.CompletionTokens == 0 {
  1031. usage.CompletionTokens = imageCount * 1400
  1032. }
  1033. }
  1034. usage.PromptTokensDetails.TextTokens = usage.PromptTokens
  1035. if usage.TotalTokens > 0 {
  1036. usage.CompletionTokens = usage.TotalTokens - usage.PromptTokens
  1037. }
  1038. if usage.CompletionTokens <= 0 {
  1039. str := responseText.String()
  1040. if len(str) > 0 {
  1041. usage = service.ResponseText2Usage(c, responseText.String(), info.UpstreamModelName, info.GetEstimatePromptTokens())
  1042. } else {
  1043. usage = &dto.Usage{}
  1044. }
  1045. }
  1046. return usage, nil
  1047. }
  1048. func GeminiChatStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
  1049. id := helper.GetResponseID(c)
  1050. createAt := common.GetTimestamp()
  1051. finishReason := constant.FinishReasonStop
  1052. usage, err := geminiStreamHandler(c, info, resp, func(data string, geminiResponse *dto.GeminiChatResponse) bool {
  1053. response, isStop := streamResponseGeminiChat2OpenAI(geminiResponse)
  1054. response.Id = id
  1055. response.Created = createAt
  1056. response.Model = info.UpstreamModelName
  1057. logger.LogDebug(c, fmt.Sprintf("info.SendResponseCount = %d", info.SendResponseCount))
  1058. if info.SendResponseCount == 0 {
  1059. // send first response
  1060. emptyResponse := helper.GenerateStartEmptyResponse(id, createAt, info.UpstreamModelName, nil)
  1061. if response.IsToolCall() {
  1062. if len(emptyResponse.Choices) > 0 && len(response.Choices) > 0 {
  1063. toolCalls := response.Choices[0].Delta.ToolCalls
  1064. copiedToolCalls := make([]dto.ToolCallResponse, len(toolCalls))
  1065. for idx := range toolCalls {
  1066. copiedToolCalls[idx] = toolCalls[idx]
  1067. copiedToolCalls[idx].Function.Arguments = ""
  1068. }
  1069. emptyResponse.Choices[0].Delta.ToolCalls = copiedToolCalls
  1070. }
  1071. finishReason = constant.FinishReasonToolCalls
  1072. err := handleStream(c, info, emptyResponse)
  1073. if err != nil {
  1074. logger.LogError(c, err.Error())
  1075. }
  1076. response.ClearToolCalls()
  1077. if response.IsFinished() {
  1078. response.Choices[0].FinishReason = nil
  1079. }
  1080. } else {
  1081. err := handleStream(c, info, emptyResponse)
  1082. if err != nil {
  1083. logger.LogError(c, err.Error())
  1084. }
  1085. }
  1086. }
  1087. err := handleStream(c, info, response)
  1088. if err != nil {
  1089. logger.LogError(c, err.Error())
  1090. }
  1091. if isStop {
  1092. _ = handleStream(c, info, helper.GenerateStopResponse(id, createAt, info.UpstreamModelName, finishReason))
  1093. }
  1094. return true
  1095. })
  1096. if err != nil {
  1097. return usage, err
  1098. }
  1099. response := helper.GenerateFinalUsageResponse(id, createAt, info.UpstreamModelName, *usage)
  1100. handleErr := handleFinalStream(c, info, response)
  1101. if handleErr != nil {
  1102. common.SysLog("send final response failed: " + handleErr.Error())
  1103. }
  1104. return usage, nil
  1105. }
  1106. func GeminiChatHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
  1107. responseBody, err := io.ReadAll(resp.Body)
  1108. if err != nil {
  1109. return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1110. }
  1111. service.CloseResponseBodyGracefully(resp)
  1112. if common.DebugEnabled {
  1113. println(string(responseBody))
  1114. }
  1115. var geminiResponse dto.GeminiChatResponse
  1116. err = common.Unmarshal(responseBody, &geminiResponse)
  1117. if err != nil {
  1118. return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1119. }
  1120. if len(geminiResponse.Candidates) == 0 {
  1121. //return nil, types.NewOpenAIError(errors.New("no candidates returned"), types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1122. //if geminiResponse.PromptFeedback != nil && geminiResponse.PromptFeedback.BlockReason != nil {
  1123. // return nil, types.NewOpenAIError(errors.New("request blocked by Gemini API: "+*geminiResponse.PromptFeedback.BlockReason), types.ErrorCodePromptBlocked, http.StatusBadRequest)
  1124. //} else {
  1125. // return nil, types.NewOpenAIError(errors.New("empty response from Gemini API"), types.ErrorCodeEmptyResponse, http.StatusInternalServerError)
  1126. //}
  1127. }
  1128. fullTextResponse := responseGeminiChat2OpenAI(c, &geminiResponse)
  1129. fullTextResponse.Model = info.UpstreamModelName
  1130. usage := dto.Usage{
  1131. PromptTokens: geminiResponse.UsageMetadata.PromptTokenCount,
  1132. CompletionTokens: geminiResponse.UsageMetadata.CandidatesTokenCount,
  1133. TotalTokens: geminiResponse.UsageMetadata.TotalTokenCount,
  1134. }
  1135. usage.CompletionTokenDetails.ReasoningTokens = geminiResponse.UsageMetadata.ThoughtsTokenCount
  1136. usage.CompletionTokens = usage.TotalTokens - usage.PromptTokens
  1137. for _, detail := range geminiResponse.UsageMetadata.PromptTokensDetails {
  1138. if detail.Modality == "AUDIO" {
  1139. usage.PromptTokensDetails.AudioTokens = detail.TokenCount
  1140. } else if detail.Modality == "TEXT" {
  1141. usage.PromptTokensDetails.TextTokens = detail.TokenCount
  1142. }
  1143. }
  1144. fullTextResponse.Usage = usage
  1145. switch info.RelayFormat {
  1146. case types.RelayFormatOpenAI:
  1147. responseBody, err = common.Marshal(fullTextResponse)
  1148. if err != nil {
  1149. return nil, types.NewError(err, types.ErrorCodeBadResponseBody)
  1150. }
  1151. case types.RelayFormatClaude:
  1152. claudeResp := service.ResponseOpenAI2Claude(fullTextResponse, info)
  1153. claudeRespStr, err := common.Marshal(claudeResp)
  1154. if err != nil {
  1155. return nil, types.NewError(err, types.ErrorCodeBadResponseBody)
  1156. }
  1157. responseBody = claudeRespStr
  1158. case types.RelayFormatGemini:
  1159. break
  1160. }
  1161. service.IOCopyBytesGracefully(c, resp, responseBody)
  1162. return &usage, nil
  1163. }
  1164. func GeminiEmbeddingHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
  1165. defer service.CloseResponseBodyGracefully(resp)
  1166. responseBody, readErr := io.ReadAll(resp.Body)
  1167. if readErr != nil {
  1168. return nil, types.NewOpenAIError(readErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1169. }
  1170. var geminiResponse dto.GeminiBatchEmbeddingResponse
  1171. if jsonErr := common.Unmarshal(responseBody, &geminiResponse); jsonErr != nil {
  1172. return nil, types.NewOpenAIError(jsonErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1173. }
  1174. // convert to openai format response
  1175. openAIResponse := dto.OpenAIEmbeddingResponse{
  1176. Object: "list",
  1177. Data: make([]dto.OpenAIEmbeddingResponseItem, 0, len(geminiResponse.Embeddings)),
  1178. Model: info.UpstreamModelName,
  1179. }
  1180. for i, embedding := range geminiResponse.Embeddings {
  1181. openAIResponse.Data = append(openAIResponse.Data, dto.OpenAIEmbeddingResponseItem{
  1182. Object: "embedding",
  1183. Embedding: embedding.Values,
  1184. Index: i,
  1185. })
  1186. }
  1187. // calculate usage
  1188. // https://ai.google.dev/gemini-api/docs/pricing?hl=zh-cn#text-embedding-004
  1189. // Google has not yet clarified how embedding models will be billed
  1190. // refer to openai billing method to use input tokens billing
  1191. // https://platform.openai.com/docs/guides/embeddings#what-are-embeddings
  1192. usage := service.ResponseText2Usage(c, "", info.UpstreamModelName, info.GetEstimatePromptTokens())
  1193. openAIResponse.Usage = *usage
  1194. jsonResponse, jsonErr := common.Marshal(openAIResponse)
  1195. if jsonErr != nil {
  1196. return nil, types.NewOpenAIError(jsonErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1197. }
  1198. service.IOCopyBytesGracefully(c, resp, jsonResponse)
  1199. return usage, nil
  1200. }
  1201. func GeminiImageHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
  1202. responseBody, readErr := io.ReadAll(resp.Body)
  1203. if readErr != nil {
  1204. return nil, types.NewOpenAIError(readErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1205. }
  1206. _ = resp.Body.Close()
  1207. var geminiResponse dto.GeminiImageResponse
  1208. if jsonErr := common.Unmarshal(responseBody, &geminiResponse); jsonErr != nil {
  1209. return nil, types.NewOpenAIError(jsonErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1210. }
  1211. if len(geminiResponse.Predictions) == 0 {
  1212. return nil, types.NewOpenAIError(errors.New("no images generated"), types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1213. }
  1214. // convert to openai format response
  1215. openAIResponse := dto.ImageResponse{
  1216. Created: common.GetTimestamp(),
  1217. Data: make([]dto.ImageData, 0, len(geminiResponse.Predictions)),
  1218. }
  1219. for _, prediction := range geminiResponse.Predictions {
  1220. if prediction.RaiFilteredReason != "" {
  1221. continue // skip filtered image
  1222. }
  1223. openAIResponse.Data = append(openAIResponse.Data, dto.ImageData{
  1224. B64Json: prediction.BytesBase64Encoded,
  1225. })
  1226. }
  1227. jsonResponse, jsonErr := json.Marshal(openAIResponse)
  1228. if jsonErr != nil {
  1229. return nil, types.NewError(jsonErr, types.ErrorCodeBadResponseBody)
  1230. }
  1231. c.Writer.Header().Set("Content-Type", "application/json")
  1232. c.Writer.WriteHeader(resp.StatusCode)
  1233. _, _ = c.Writer.Write(jsonResponse)
  1234. // https://github.com/google-gemini/cookbook/blob/719a27d752aac33f39de18a8d3cb42a70874917e/quickstarts/Counting_Tokens.ipynb
  1235. // each image has fixed 258 tokens
  1236. const imageTokens = 258
  1237. generatedImages := len(openAIResponse.Data)
  1238. usage := &dto.Usage{
  1239. PromptTokens: imageTokens * generatedImages, // each generated image has fixed 258 tokens
  1240. CompletionTokens: 0, // image generation does not calculate completion tokens
  1241. TotalTokens: imageTokens * generatedImages,
  1242. }
  1243. return usage, nil
  1244. }