relay-gemini.go 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697
  1. package gemini
  2. import (
  3. "context"
  4. "encoding/json"
  5. "errors"
  6. "fmt"
  7. "io"
  8. "net/http"
  9. "strconv"
  10. "strings"
  11. "time"
  12. "unicode/utf8"
  13. "github.com/QuantumNous/new-api/common"
  14. "github.com/QuantumNous/new-api/constant"
  15. "github.com/QuantumNous/new-api/dto"
  16. "github.com/QuantumNous/new-api/logger"
  17. "github.com/QuantumNous/new-api/relay/channel/openai"
  18. relaycommon "github.com/QuantumNous/new-api/relay/common"
  19. "github.com/QuantumNous/new-api/relay/helper"
  20. "github.com/QuantumNous/new-api/service"
  21. "github.com/QuantumNous/new-api/setting/model_setting"
  22. "github.com/QuantumNous/new-api/setting/reasoning"
  23. "github.com/QuantumNous/new-api/types"
  24. "github.com/gin-gonic/gin"
  25. )
  26. // https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference?hl=zh-cn#blob
  27. var geminiSupportedMimeTypes = map[string]bool{
  28. "application/pdf": true,
  29. "audio/mpeg": true,
  30. "audio/mp3": true,
  31. "audio/wav": true,
  32. "image/png": true,
  33. "image/jpeg": true,
  34. "image/jpg": true, // support old image/jpeg
  35. "image/webp": true,
  36. "text/plain": true,
  37. "video/mov": true,
  38. "video/mpeg": true,
  39. "video/mp4": true,
  40. "video/mpg": true,
  41. "video/avi": true,
  42. "video/wmv": true,
  43. "video/mpegps": true,
  44. "video/flv": true,
  45. }
  46. const thoughtSignatureBypassValue = "context_engineering_is_the_way_to_go"
  47. // Gemini 允许的思考预算范围
  48. const (
  49. pro25MinBudget = 128
  50. pro25MaxBudget = 32768
  51. flash25MaxBudget = 24576
  52. flash25LiteMinBudget = 512
  53. flash25LiteMaxBudget = 24576
  54. )
  55. func isNew25ProModel(modelName string) bool {
  56. return strings.HasPrefix(modelName, "gemini-2.5-pro") &&
  57. !strings.HasPrefix(modelName, "gemini-2.5-pro-preview-05-06") &&
  58. !strings.HasPrefix(modelName, "gemini-2.5-pro-preview-03-25")
  59. }
  60. func is25FlashLiteModel(modelName string) bool {
  61. return strings.HasPrefix(modelName, "gemini-2.5-flash-lite")
  62. }
  63. // clampThinkingBudget 根据模型名称将预算限制在允许的范围内
  64. func clampThinkingBudget(modelName string, budget int) int {
  65. isNew25Pro := isNew25ProModel(modelName)
  66. is25FlashLite := is25FlashLiteModel(modelName)
  67. if is25FlashLite {
  68. if budget < flash25LiteMinBudget {
  69. return flash25LiteMinBudget
  70. }
  71. if budget > flash25LiteMaxBudget {
  72. return flash25LiteMaxBudget
  73. }
  74. } else if isNew25Pro {
  75. if budget < pro25MinBudget {
  76. return pro25MinBudget
  77. }
  78. if budget > pro25MaxBudget {
  79. return pro25MaxBudget
  80. }
  81. } else { // 其他模型
  82. if budget < 0 {
  83. return 0
  84. }
  85. if budget > flash25MaxBudget {
  86. return flash25MaxBudget
  87. }
  88. }
  89. return budget
  90. }
  91. // "effort": "high" - Allocates a large portion of tokens for reasoning (approximately 80% of max_tokens)
  92. // "effort": "medium" - Allocates a moderate portion of tokens (approximately 50% of max_tokens)
  93. // "effort": "low" - Allocates a smaller portion of tokens (approximately 20% of max_tokens)
  94. // "effort": "minimal" - Allocates a minimal portion of tokens (approximately 5% of max_tokens)
  95. func clampThinkingBudgetByEffort(modelName string, effort string) int {
  96. isNew25Pro := isNew25ProModel(modelName)
  97. is25FlashLite := is25FlashLiteModel(modelName)
  98. maxBudget := 0
  99. if is25FlashLite {
  100. maxBudget = flash25LiteMaxBudget
  101. }
  102. if isNew25Pro {
  103. maxBudget = pro25MaxBudget
  104. } else {
  105. maxBudget = flash25MaxBudget
  106. }
  107. switch effort {
  108. case "high":
  109. maxBudget = maxBudget * 80 / 100
  110. case "medium":
  111. maxBudget = maxBudget * 50 / 100
  112. case "low":
  113. maxBudget = maxBudget * 20 / 100
  114. case "minimal":
  115. maxBudget = maxBudget * 5 / 100
  116. }
  117. return clampThinkingBudget(modelName, maxBudget)
  118. }
  119. func ThinkingAdaptor(geminiRequest *dto.GeminiChatRequest, info *relaycommon.RelayInfo, oaiRequest ...dto.GeneralOpenAIRequest) {
  120. if model_setting.GetGeminiSettings().ThinkingAdapterEnabled {
  121. modelName := info.UpstreamModelName
  122. isNew25Pro := strings.HasPrefix(modelName, "gemini-2.5-pro") &&
  123. !strings.HasPrefix(modelName, "gemini-2.5-pro-preview-05-06") &&
  124. !strings.HasPrefix(modelName, "gemini-2.5-pro-preview-03-25")
  125. if strings.Contains(modelName, "-thinking-") {
  126. parts := strings.SplitN(modelName, "-thinking-", 2)
  127. if len(parts) == 2 && parts[1] != "" {
  128. if budgetTokens, err := strconv.Atoi(parts[1]); err == nil {
  129. clampedBudget := clampThinkingBudget(modelName, budgetTokens)
  130. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  131. ThinkingBudget: common.GetPointer(clampedBudget),
  132. IncludeThoughts: true,
  133. }
  134. }
  135. }
  136. } else if strings.HasSuffix(modelName, "-thinking") {
  137. unsupportedModels := []string{
  138. "gemini-2.5-pro-preview-05-06",
  139. "gemini-2.5-pro-preview-03-25",
  140. }
  141. isUnsupported := false
  142. for _, unsupportedModel := range unsupportedModels {
  143. if strings.HasPrefix(modelName, unsupportedModel) {
  144. isUnsupported = true
  145. break
  146. }
  147. }
  148. if isUnsupported {
  149. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  150. IncludeThoughts: true,
  151. }
  152. } else {
  153. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  154. IncludeThoughts: true,
  155. }
  156. if geminiRequest.GenerationConfig.MaxOutputTokens > 0 {
  157. budgetTokens := model_setting.GetGeminiSettings().ThinkingAdapterBudgetTokensPercentage * float64(geminiRequest.GenerationConfig.MaxOutputTokens)
  158. clampedBudget := clampThinkingBudget(modelName, int(budgetTokens))
  159. geminiRequest.GenerationConfig.ThinkingConfig.ThinkingBudget = common.GetPointer(clampedBudget)
  160. } else {
  161. if len(oaiRequest) > 0 {
  162. // 如果有reasoningEffort参数,则根据其值设置思考预算
  163. geminiRequest.GenerationConfig.ThinkingConfig.ThinkingBudget = common.GetPointer(clampThinkingBudgetByEffort(modelName, oaiRequest[0].ReasoningEffort))
  164. }
  165. }
  166. }
  167. } else if strings.HasSuffix(modelName, "-nothinking") {
  168. if !isNew25Pro {
  169. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  170. ThinkingBudget: common.GetPointer(0),
  171. }
  172. }
  173. } else if _, level, ok := reasoning.TrimEffortSuffix(info.UpstreamModelName); ok && level != "" {
  174. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  175. IncludeThoughts: true,
  176. ThinkingLevel: level,
  177. }
  178. info.ReasoningEffort = level
  179. }
  180. }
  181. }
  182. // Setting safety to the lowest possible values since Gemini is already powerless enough
  183. func CovertOpenAI2Gemini(c *gin.Context, textRequest dto.GeneralOpenAIRequest, info *relaycommon.RelayInfo) (*dto.GeminiChatRequest, error) {
  184. geminiRequest := dto.GeminiChatRequest{
  185. Contents: make([]dto.GeminiChatContent, 0, len(textRequest.Messages)),
  186. GenerationConfig: dto.GeminiChatGenerationConfig{
  187. Temperature: textRequest.Temperature,
  188. TopP: textRequest.TopP,
  189. MaxOutputTokens: textRequest.GetMaxTokens(),
  190. Seed: int64(textRequest.Seed),
  191. },
  192. }
  193. attachThoughtSignature := (info.ChannelType == constant.ChannelTypeGemini ||
  194. info.ChannelType == constant.ChannelTypeVertexAi) &&
  195. model_setting.GetGeminiSettings().FunctionCallThoughtSignatureEnabled
  196. if model_setting.IsGeminiModelSupportImagine(info.UpstreamModelName) {
  197. geminiRequest.GenerationConfig.ResponseModalities = []string{
  198. "TEXT",
  199. "IMAGE",
  200. }
  201. }
  202. if stopSequences := parseStopSequences(textRequest.Stop); len(stopSequences) > 0 {
  203. // Gemini supports up to 5 stop sequences
  204. if len(stopSequences) > 5 {
  205. stopSequences = stopSequences[:5]
  206. }
  207. geminiRequest.GenerationConfig.StopSequences = stopSequences
  208. }
  209. adaptorWithExtraBody := false
  210. // patch extra_body
  211. if len(textRequest.ExtraBody) > 0 {
  212. if !strings.HasSuffix(info.UpstreamModelName, "-nothinking") {
  213. var extraBody map[string]interface{}
  214. if err := common.Unmarshal(textRequest.ExtraBody, &extraBody); err != nil {
  215. return nil, fmt.Errorf("invalid extra body: %w", err)
  216. }
  217. // eg. {"google":{"thinking_config":{"thinking_budget":5324,"include_thoughts":true}}}
  218. if googleBody, ok := extraBody["google"].(map[string]interface{}); ok {
  219. adaptorWithExtraBody = true
  220. // check error param name like thinkingConfig, should be thinking_config
  221. if _, hasErrorParam := googleBody["thinkingConfig"]; hasErrorParam {
  222. return nil, errors.New("extra_body.google.thinkingConfig is not supported, use extra_body.google.thinking_config instead")
  223. }
  224. if thinkingConfig, ok := googleBody["thinking_config"].(map[string]interface{}); ok {
  225. // check error param name like thinkingBudget, should be thinking_budget
  226. if _, hasErrorParam := thinkingConfig["thinkingBudget"]; hasErrorParam {
  227. return nil, errors.New("extra_body.google.thinking_config.thinkingBudget is not supported, use extra_body.google.thinking_config.thinking_budget instead")
  228. }
  229. if budget, ok := thinkingConfig["thinking_budget"].(float64); ok {
  230. budgetInt := int(budget)
  231. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  232. ThinkingBudget: common.GetPointer(budgetInt),
  233. IncludeThoughts: true,
  234. }
  235. } else {
  236. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  237. IncludeThoughts: true,
  238. }
  239. }
  240. }
  241. // check error param name like imageConfig, should be image_config
  242. if _, hasErrorParam := googleBody["imageConfig"]; hasErrorParam {
  243. return nil, errors.New("extra_body.google.imageConfig is not supported, use extra_body.google.image_config instead")
  244. }
  245. if imageConfig, ok := googleBody["image_config"].(map[string]interface{}); ok {
  246. // check error param name like aspectRatio, should be aspect_ratio
  247. if _, hasErrorParam := imageConfig["aspectRatio"]; hasErrorParam {
  248. return nil, errors.New("extra_body.google.image_config.aspectRatio is not supported, use extra_body.google.image_config.aspect_ratio instead")
  249. }
  250. // check error param name like imageSize, should be image_size
  251. if _, hasErrorParam := imageConfig["imageSize"]; hasErrorParam {
  252. return nil, errors.New("extra_body.google.image_config.imageSize is not supported, use extra_body.google.image_config.image_size instead")
  253. }
  254. // convert snake_case to camelCase for Gemini API
  255. geminiImageConfig := make(map[string]interface{})
  256. if aspectRatio, ok := imageConfig["aspect_ratio"]; ok {
  257. geminiImageConfig["aspectRatio"] = aspectRatio
  258. }
  259. if imageSize, ok := imageConfig["image_size"]; ok {
  260. geminiImageConfig["imageSize"] = imageSize
  261. }
  262. if len(geminiImageConfig) > 0 {
  263. imageConfigBytes, err := common.Marshal(geminiImageConfig)
  264. if err != nil {
  265. return nil, fmt.Errorf("failed to marshal image_config: %w", err)
  266. }
  267. geminiRequest.GenerationConfig.ImageConfig = imageConfigBytes
  268. }
  269. }
  270. }
  271. }
  272. }
  273. if !adaptorWithExtraBody {
  274. ThinkingAdaptor(&geminiRequest, info, textRequest)
  275. }
  276. safetySettings := make([]dto.GeminiChatSafetySettings, 0, len(SafetySettingList))
  277. for _, category := range SafetySettingList {
  278. safetySettings = append(safetySettings, dto.GeminiChatSafetySettings{
  279. Category: category,
  280. Threshold: model_setting.GetGeminiSafetySetting(category),
  281. })
  282. }
  283. geminiRequest.SafetySettings = safetySettings
  284. // openaiContent.FuncToToolCalls()
  285. if textRequest.Tools != nil {
  286. functions := make([]dto.FunctionRequest, 0, len(textRequest.Tools))
  287. googleSearch := false
  288. codeExecution := false
  289. urlContext := false
  290. for _, tool := range textRequest.Tools {
  291. if tool.Function.Name == "googleSearch" {
  292. googleSearch = true
  293. continue
  294. }
  295. if tool.Function.Name == "codeExecution" {
  296. codeExecution = true
  297. continue
  298. }
  299. if tool.Function.Name == "urlContext" {
  300. urlContext = true
  301. continue
  302. }
  303. if tool.Function.Parameters != nil {
  304. params, ok := tool.Function.Parameters.(map[string]interface{})
  305. if ok {
  306. if props, hasProps := params["properties"].(map[string]interface{}); hasProps {
  307. if len(props) == 0 {
  308. tool.Function.Parameters = nil
  309. }
  310. }
  311. }
  312. }
  313. // Clean the parameters before appending
  314. cleanedParams := cleanFunctionParameters(tool.Function.Parameters)
  315. tool.Function.Parameters = cleanedParams
  316. functions = append(functions, tool.Function)
  317. }
  318. geminiTools := geminiRequest.GetTools()
  319. if codeExecution {
  320. geminiTools = append(geminiTools, dto.GeminiChatTool{
  321. CodeExecution: make(map[string]string),
  322. })
  323. }
  324. if googleSearch {
  325. geminiTools = append(geminiTools, dto.GeminiChatTool{
  326. GoogleSearch: make(map[string]string),
  327. })
  328. }
  329. if urlContext {
  330. geminiTools = append(geminiTools, dto.GeminiChatTool{
  331. URLContext: make(map[string]string),
  332. })
  333. }
  334. if len(functions) > 0 {
  335. geminiTools = append(geminiTools, dto.GeminiChatTool{
  336. FunctionDeclarations: functions,
  337. })
  338. }
  339. geminiRequest.SetTools(geminiTools)
  340. // [NEW] Convert OpenAI tool_choice to Gemini toolConfig.functionCallingConfig
  341. // Mapping: "auto" -> "AUTO", "none" -> "NONE", "required" -> "ANY"
  342. // Object format: {"type": "function", "function": {"name": "xxx"}} -> "ANY" + allowedFunctionNames
  343. if textRequest.ToolChoice != nil {
  344. geminiRequest.ToolConfig = convertToolChoiceToGeminiConfig(textRequest.ToolChoice)
  345. }
  346. }
  347. if textRequest.ResponseFormat != nil && (textRequest.ResponseFormat.Type == "json_schema" || textRequest.ResponseFormat.Type == "json_object") {
  348. geminiRequest.GenerationConfig.ResponseMimeType = "application/json"
  349. if len(textRequest.ResponseFormat.JsonSchema) > 0 {
  350. // 先将json.RawMessage解析
  351. var jsonSchema dto.FormatJsonSchema
  352. if err := common.Unmarshal(textRequest.ResponseFormat.JsonSchema, &jsonSchema); err == nil {
  353. cleanedSchema := removeAdditionalPropertiesWithDepth(jsonSchema.Schema, 0)
  354. geminiRequest.GenerationConfig.ResponseSchema = cleanedSchema
  355. }
  356. }
  357. }
  358. tool_call_ids := make(map[string]string)
  359. var system_content []string
  360. //shouldAddDummyModelMessage := false
  361. for _, message := range textRequest.Messages {
  362. if message.Role == "system" || message.Role == "developer" {
  363. system_content = append(system_content, message.StringContent())
  364. continue
  365. } else if message.Role == "tool" || message.Role == "function" {
  366. if len(geminiRequest.Contents) == 0 || geminiRequest.Contents[len(geminiRequest.Contents)-1].Role == "model" {
  367. geminiRequest.Contents = append(geminiRequest.Contents, dto.GeminiChatContent{
  368. Role: "user",
  369. })
  370. }
  371. var parts = &geminiRequest.Contents[len(geminiRequest.Contents)-1].Parts
  372. name := ""
  373. if message.Name != nil {
  374. name = *message.Name
  375. } else if val, exists := tool_call_ids[message.ToolCallId]; exists {
  376. name = val
  377. }
  378. var contentMap map[string]interface{}
  379. contentStr := message.StringContent()
  380. // 1. 尝试解析为 JSON 对象
  381. if err := json.Unmarshal([]byte(contentStr), &contentMap); err != nil {
  382. // 2. 如果失败,尝试解析为 JSON 数组
  383. var contentSlice []interface{}
  384. if err := json.Unmarshal([]byte(contentStr), &contentSlice); err == nil {
  385. // 如果是数组,包装成对象
  386. contentMap = map[string]interface{}{"result": contentSlice}
  387. } else {
  388. // 3. 如果再次失败,作为纯文本处理
  389. contentMap = map[string]interface{}{"content": contentStr}
  390. }
  391. }
  392. functionResp := &dto.GeminiFunctionResponse{
  393. Name: name,
  394. Response: contentMap,
  395. }
  396. *parts = append(*parts, dto.GeminiPart{
  397. FunctionResponse: functionResp,
  398. })
  399. continue
  400. }
  401. var parts []dto.GeminiPart
  402. content := dto.GeminiChatContent{
  403. Role: message.Role,
  404. }
  405. shouldAttachThoughtSignature := attachThoughtSignature && (message.Role == "assistant" || message.Role == "model")
  406. signatureAttached := false
  407. // isToolCall := false
  408. if message.ToolCalls != nil {
  409. // message.Role = "model"
  410. // isToolCall = true
  411. for _, call := range message.ParseToolCalls() {
  412. args := map[string]interface{}{}
  413. if call.Function.Arguments != "" {
  414. if json.Unmarshal([]byte(call.Function.Arguments), &args) != nil {
  415. return nil, fmt.Errorf("invalid arguments for function %s, args: %s", call.Function.Name, call.Function.Arguments)
  416. }
  417. }
  418. toolCall := dto.GeminiPart{
  419. FunctionCall: &dto.FunctionCall{
  420. FunctionName: call.Function.Name,
  421. Arguments: args,
  422. },
  423. }
  424. if shouldAttachThoughtSignature && !signatureAttached && hasFunctionCallContent(toolCall.FunctionCall) && len(toolCall.ThoughtSignature) == 0 {
  425. toolCall.ThoughtSignature = json.RawMessage(strconv.Quote(thoughtSignatureBypassValue))
  426. signatureAttached = true
  427. }
  428. parts = append(parts, toolCall)
  429. tool_call_ids[call.ID] = call.Function.Name
  430. }
  431. }
  432. openaiContent := message.ParseContent()
  433. for _, part := range openaiContent {
  434. if part.Type == dto.ContentTypeText {
  435. if part.Text == "" {
  436. continue
  437. }
  438. // check markdown image ![image](data:image/jpeg;base64,xxxxxxxxxxxx)
  439. // 使用字符串查找而非正则,避免大文本性能问题
  440. text := part.Text
  441. hasMarkdownImage := false
  442. for {
  443. // 快速检查是否包含 markdown 图片标记
  444. startIdx := strings.Index(text, "![")
  445. if startIdx == -1 {
  446. break
  447. }
  448. // 找到 ](
  449. bracketIdx := strings.Index(text[startIdx:], "](data:")
  450. if bracketIdx == -1 {
  451. break
  452. }
  453. bracketIdx += startIdx
  454. // 找到闭合的 )
  455. closeIdx := strings.Index(text[bracketIdx+2:], ")")
  456. if closeIdx == -1 {
  457. break
  458. }
  459. closeIdx += bracketIdx + 2
  460. hasMarkdownImage = true
  461. // 添加图片前的文本
  462. if startIdx > 0 {
  463. textBefore := text[:startIdx]
  464. if textBefore != "" {
  465. parts = append(parts, dto.GeminiPart{
  466. Text: textBefore,
  467. })
  468. }
  469. }
  470. // 提取 data URL (从 "](" 后面开始,到 ")" 之前)
  471. dataUrl := text[bracketIdx+2 : closeIdx]
  472. format, base64String, err := service.DecodeBase64FileData(dataUrl)
  473. if err != nil {
  474. return nil, fmt.Errorf("decode markdown base64 image data failed: %s", err.Error())
  475. }
  476. imgPart := dto.GeminiPart{
  477. InlineData: &dto.GeminiInlineData{
  478. MimeType: format,
  479. Data: base64String,
  480. },
  481. }
  482. if shouldAttachThoughtSignature {
  483. imgPart.ThoughtSignature = json.RawMessage(strconv.Quote(thoughtSignatureBypassValue))
  484. }
  485. parts = append(parts, imgPart)
  486. // 继续处理剩余文本
  487. text = text[closeIdx+1:]
  488. }
  489. // 添加剩余文本或原始文本(如果没有找到 markdown 图片)
  490. if !hasMarkdownImage {
  491. parts = append(parts, dto.GeminiPart{
  492. Text: part.Text,
  493. })
  494. }
  495. } else if part.Type == dto.ContentTypeImageURL {
  496. // 使用统一的文件服务获取图片数据
  497. var source *types.FileSource
  498. imageUrl := part.GetImageMedia().Url
  499. if strings.HasPrefix(imageUrl, "http") {
  500. source = types.NewURLFileSource(imageUrl)
  501. } else {
  502. source = types.NewBase64FileSource(imageUrl, "")
  503. }
  504. base64Data, mimeType, err := service.GetBase64Data(c, source, "formatting image for Gemini")
  505. if err != nil {
  506. return nil, fmt.Errorf("get file data from '%s' failed: %w", source.GetIdentifier(), err)
  507. }
  508. // 校验 MimeType 是否在 Gemini 支持的白名单中
  509. if _, ok := geminiSupportedMimeTypes[strings.ToLower(mimeType)]; !ok {
  510. return nil, fmt.Errorf("mime type is not supported by Gemini: '%s', url: '%s', supported types are: %v", mimeType, source.GetIdentifier(), getSupportedMimeTypesList())
  511. }
  512. parts = append(parts, dto.GeminiPart{
  513. InlineData: &dto.GeminiInlineData{
  514. MimeType: mimeType,
  515. Data: base64Data,
  516. },
  517. })
  518. } else if part.Type == dto.ContentTypeFile {
  519. if part.GetFile().FileId != "" {
  520. return nil, fmt.Errorf("only base64 file is supported in gemini")
  521. }
  522. fileSource := types.NewBase64FileSource(part.GetFile().FileData, "")
  523. base64Data, mimeType, err := service.GetBase64Data(c, fileSource, "formatting file for Gemini")
  524. if err != nil {
  525. return nil, fmt.Errorf("decode base64 file data failed: %s", err.Error())
  526. }
  527. parts = append(parts, dto.GeminiPart{
  528. InlineData: &dto.GeminiInlineData{
  529. MimeType: mimeType,
  530. Data: base64Data,
  531. },
  532. })
  533. } else if part.Type == dto.ContentTypeInputAudio {
  534. if part.GetInputAudio().Data == "" {
  535. return nil, fmt.Errorf("only base64 audio is supported in gemini")
  536. }
  537. audioSource := types.NewBase64FileSource(part.GetInputAudio().Data, "audio/"+part.GetInputAudio().Format)
  538. base64Data, mimeType, err := service.GetBase64Data(c, audioSource, "formatting audio for Gemini")
  539. if err != nil {
  540. return nil, fmt.Errorf("decode base64 audio data failed: %s", err.Error())
  541. }
  542. parts = append(parts, dto.GeminiPart{
  543. InlineData: &dto.GeminiInlineData{
  544. MimeType: mimeType,
  545. Data: base64Data,
  546. },
  547. })
  548. }
  549. }
  550. // 如果需要附加签名但还没有附加(没有 tool_calls 或 tool_calls 为空),
  551. // 则在第一个文本 part 上附加 thoughtSignature
  552. if shouldAttachThoughtSignature && !signatureAttached && len(parts) > 0 {
  553. for i := range parts {
  554. if parts[i].Text != "" {
  555. parts[i].ThoughtSignature = json.RawMessage(strconv.Quote(thoughtSignatureBypassValue))
  556. break
  557. }
  558. }
  559. }
  560. content.Parts = parts
  561. // there's no assistant role in gemini and API shall vomit if Role is not user or model
  562. if content.Role == "assistant" {
  563. content.Role = "model"
  564. }
  565. if len(content.Parts) > 0 {
  566. geminiRequest.Contents = append(geminiRequest.Contents, content)
  567. }
  568. }
  569. if len(system_content) > 0 {
  570. geminiRequest.SystemInstructions = &dto.GeminiChatContent{
  571. Parts: []dto.GeminiPart{
  572. {
  573. Text: strings.Join(system_content, "\n"),
  574. },
  575. },
  576. }
  577. }
  578. return &geminiRequest, nil
  579. }
  580. // parseStopSequences 解析停止序列,支持字符串或字符串数组
  581. func parseStopSequences(stop any) []string {
  582. if stop == nil {
  583. return nil
  584. }
  585. switch v := stop.(type) {
  586. case string:
  587. if v != "" {
  588. return []string{v}
  589. }
  590. case []string:
  591. return v
  592. case []interface{}:
  593. sequences := make([]string, 0, len(v))
  594. for _, item := range v {
  595. if str, ok := item.(string); ok && str != "" {
  596. sequences = append(sequences, str)
  597. }
  598. }
  599. return sequences
  600. }
  601. return nil
  602. }
  603. func hasFunctionCallContent(call *dto.FunctionCall) bool {
  604. if call == nil {
  605. return false
  606. }
  607. if strings.TrimSpace(call.FunctionName) != "" {
  608. return true
  609. }
  610. switch v := call.Arguments.(type) {
  611. case nil:
  612. return false
  613. case string:
  614. return strings.TrimSpace(v) != ""
  615. case map[string]interface{}:
  616. return len(v) > 0
  617. case []interface{}:
  618. return len(v) > 0
  619. default:
  620. return true
  621. }
  622. }
  623. // Helper function to get a list of supported MIME types for error messages
  624. func getSupportedMimeTypesList() []string {
  625. keys := make([]string, 0, len(geminiSupportedMimeTypes))
  626. for k := range geminiSupportedMimeTypes {
  627. keys = append(keys, k)
  628. }
  629. return keys
  630. }
  631. var geminiOpenAPISchemaAllowedFields = map[string]struct{}{
  632. "anyOf": {},
  633. "default": {},
  634. "description": {},
  635. "enum": {},
  636. "example": {},
  637. "format": {},
  638. "items": {},
  639. "maxItems": {},
  640. "maxLength": {},
  641. "maxProperties": {},
  642. "maximum": {},
  643. "minItems": {},
  644. "minLength": {},
  645. "minProperties": {},
  646. "minimum": {},
  647. "nullable": {},
  648. "pattern": {},
  649. "properties": {},
  650. "propertyOrdering": {},
  651. "required": {},
  652. "title": {},
  653. "type": {},
  654. }
  655. const geminiFunctionSchemaMaxDepth = 64
  656. // cleanFunctionParameters recursively removes unsupported fields from Gemini function parameters.
  657. func cleanFunctionParameters(params interface{}) interface{} {
  658. return cleanFunctionParametersWithDepth(params, 0)
  659. }
  660. func cleanFunctionParametersWithDepth(params interface{}, depth int) interface{} {
  661. if params == nil {
  662. return nil
  663. }
  664. if depth >= geminiFunctionSchemaMaxDepth {
  665. return cleanFunctionParametersShallow(params)
  666. }
  667. switch v := params.(type) {
  668. case map[string]interface{}:
  669. // Keep only Gemini-supported OpenAPI schema subset fields (per official SDK Schema).
  670. cleanedMap := make(map[string]interface{}, len(v))
  671. for k, val := range v {
  672. if _, ok := geminiOpenAPISchemaAllowedFields[k]; ok {
  673. cleanedMap[k] = val
  674. }
  675. }
  676. normalizeGeminiSchemaTypeAndNullable(cleanedMap)
  677. // Clean properties
  678. if props, ok := cleanedMap["properties"].(map[string]interface{}); ok && props != nil {
  679. cleanedProps := make(map[string]interface{})
  680. for propName, propValue := range props {
  681. cleanedProps[propName] = cleanFunctionParametersWithDepth(propValue, depth+1)
  682. }
  683. cleanedMap["properties"] = cleanedProps
  684. }
  685. // Recursively clean items in arrays
  686. if items, ok := cleanedMap["items"].(map[string]interface{}); ok && items != nil {
  687. cleanedMap["items"] = cleanFunctionParametersWithDepth(items, depth+1)
  688. }
  689. // OpenAPI tuple-style items is not supported by Gemini SDK Schema; keep first to avoid API rejection.
  690. if itemsArray, ok := cleanedMap["items"].([]interface{}); ok && len(itemsArray) > 0 {
  691. cleanedMap["items"] = cleanFunctionParametersWithDepth(itemsArray[0], depth+1)
  692. }
  693. // Recursively clean anyOf
  694. if nested, ok := cleanedMap["anyOf"].([]interface{}); ok && nested != nil {
  695. cleanedNested := make([]interface{}, len(nested))
  696. for i, item := range nested {
  697. cleanedNested[i] = cleanFunctionParametersWithDepth(item, depth+1)
  698. }
  699. cleanedMap["anyOf"] = cleanedNested
  700. }
  701. return cleanedMap
  702. case []interface{}:
  703. // Handle arrays of schemas
  704. cleanedArray := make([]interface{}, len(v))
  705. for i, item := range v {
  706. cleanedArray[i] = cleanFunctionParametersWithDepth(item, depth+1)
  707. }
  708. return cleanedArray
  709. default:
  710. // Not a map or array, return as is (e.g., could be a primitive)
  711. return params
  712. }
  713. }
  714. func cleanFunctionParametersShallow(params interface{}) interface{} {
  715. switch v := params.(type) {
  716. case map[string]interface{}:
  717. cleanedMap := make(map[string]interface{}, len(v))
  718. for k, val := range v {
  719. if _, ok := geminiOpenAPISchemaAllowedFields[k]; ok {
  720. cleanedMap[k] = val
  721. }
  722. }
  723. normalizeGeminiSchemaTypeAndNullable(cleanedMap)
  724. // Stop recursion and avoid retaining huge nested structures.
  725. delete(cleanedMap, "properties")
  726. delete(cleanedMap, "items")
  727. delete(cleanedMap, "anyOf")
  728. return cleanedMap
  729. case []interface{}:
  730. // Prefer an empty list over deep recursion on attacker-controlled inputs.
  731. return []interface{}{}
  732. default:
  733. return params
  734. }
  735. }
  736. func normalizeGeminiSchemaTypeAndNullable(schema map[string]interface{}) {
  737. rawType, ok := schema["type"]
  738. if !ok || rawType == nil {
  739. return
  740. }
  741. normalize := func(t string) (string, bool) {
  742. switch strings.ToLower(strings.TrimSpace(t)) {
  743. case "object":
  744. return "OBJECT", false
  745. case "array":
  746. return "ARRAY", false
  747. case "string":
  748. return "STRING", false
  749. case "integer":
  750. return "INTEGER", false
  751. case "number":
  752. return "NUMBER", false
  753. case "boolean":
  754. return "BOOLEAN", false
  755. case "null":
  756. return "", true
  757. default:
  758. return t, false
  759. }
  760. }
  761. switch t := rawType.(type) {
  762. case string:
  763. normalized, isNull := normalize(t)
  764. if isNull {
  765. schema["nullable"] = true
  766. delete(schema, "type")
  767. return
  768. }
  769. schema["type"] = normalized
  770. case []interface{}:
  771. nullable := false
  772. var chosen string
  773. for _, item := range t {
  774. if s, ok := item.(string); ok {
  775. normalized, isNull := normalize(s)
  776. if isNull {
  777. nullable = true
  778. continue
  779. }
  780. if chosen == "" {
  781. chosen = normalized
  782. }
  783. }
  784. }
  785. if nullable {
  786. schema["nullable"] = true
  787. }
  788. if chosen != "" {
  789. schema["type"] = chosen
  790. } else {
  791. delete(schema, "type")
  792. }
  793. }
  794. }
  795. func removeAdditionalPropertiesWithDepth(schema interface{}, depth int) interface{} {
  796. if depth >= 5 {
  797. return schema
  798. }
  799. v, ok := schema.(map[string]interface{})
  800. if !ok || len(v) == 0 {
  801. return schema
  802. }
  803. // 删除所有的title字段
  804. delete(v, "title")
  805. delete(v, "$schema")
  806. // 如果type不为object和array,则直接返回
  807. if typeVal, exists := v["type"]; !exists || (typeVal != "object" && typeVal != "array") {
  808. return schema
  809. }
  810. switch v["type"] {
  811. case "object":
  812. delete(v, "additionalProperties")
  813. // 处理 properties
  814. if properties, ok := v["properties"].(map[string]interface{}); ok {
  815. for key, value := range properties {
  816. properties[key] = removeAdditionalPropertiesWithDepth(value, depth+1)
  817. }
  818. }
  819. for _, field := range []string{"allOf", "anyOf", "oneOf"} {
  820. if nested, ok := v[field].([]interface{}); ok {
  821. for i, item := range nested {
  822. nested[i] = removeAdditionalPropertiesWithDepth(item, depth+1)
  823. }
  824. }
  825. }
  826. case "array":
  827. if items, ok := v["items"].(map[string]interface{}); ok {
  828. v["items"] = removeAdditionalPropertiesWithDepth(items, depth+1)
  829. }
  830. }
  831. return v
  832. }
  833. func unescapeString(s string) (string, error) {
  834. var result []rune
  835. escaped := false
  836. i := 0
  837. for i < len(s) {
  838. r, size := utf8.DecodeRuneInString(s[i:]) // 正确解码UTF-8字符
  839. if r == utf8.RuneError {
  840. return "", fmt.Errorf("invalid UTF-8 encoding")
  841. }
  842. if escaped {
  843. // 如果是转义符后的字符,检查其类型
  844. switch r {
  845. case '"':
  846. result = append(result, '"')
  847. case '\\':
  848. result = append(result, '\\')
  849. case '/':
  850. result = append(result, '/')
  851. case 'b':
  852. result = append(result, '\b')
  853. case 'f':
  854. result = append(result, '\f')
  855. case 'n':
  856. result = append(result, '\n')
  857. case 'r':
  858. result = append(result, '\r')
  859. case 't':
  860. result = append(result, '\t')
  861. case '\'':
  862. result = append(result, '\'')
  863. default:
  864. // 如果遇到一个非法的转义字符,直接按原样输出
  865. result = append(result, '\\', r)
  866. }
  867. escaped = false
  868. } else {
  869. if r == '\\' {
  870. escaped = true // 记录反斜杠作为转义符
  871. } else {
  872. result = append(result, r)
  873. }
  874. }
  875. i += size // 移动到下一个字符
  876. }
  877. return string(result), nil
  878. }
  879. func unescapeMapOrSlice(data interface{}) interface{} {
  880. switch v := data.(type) {
  881. case map[string]interface{}:
  882. for k, val := range v {
  883. v[k] = unescapeMapOrSlice(val)
  884. }
  885. case []interface{}:
  886. for i, val := range v {
  887. v[i] = unescapeMapOrSlice(val)
  888. }
  889. case string:
  890. if unescaped, err := unescapeString(v); err != nil {
  891. return v
  892. } else {
  893. return unescaped
  894. }
  895. }
  896. return data
  897. }
  898. func getResponseToolCall(item *dto.GeminiPart) *dto.ToolCallResponse {
  899. var argsBytes []byte
  900. var err error
  901. // 移除 unescapeMapOrSlice 调用,直接使用 json.Marshal
  902. // JSON 序列化/反序列化已经正确处理了转义字符
  903. argsBytes, err = json.Marshal(item.FunctionCall.Arguments)
  904. if err != nil {
  905. return nil
  906. }
  907. return &dto.ToolCallResponse{
  908. ID: fmt.Sprintf("call_%s", common.GetUUID()),
  909. Type: "function",
  910. Function: dto.FunctionResponse{
  911. Arguments: string(argsBytes),
  912. Name: item.FunctionCall.FunctionName,
  913. },
  914. }
  915. }
  916. func responseGeminiChat2OpenAI(c *gin.Context, response *dto.GeminiChatResponse) *dto.OpenAITextResponse {
  917. fullTextResponse := dto.OpenAITextResponse{
  918. Id: helper.GetResponseID(c),
  919. Object: "chat.completion",
  920. Created: common.GetTimestamp(),
  921. Choices: make([]dto.OpenAITextResponseChoice, 0, len(response.Candidates)),
  922. }
  923. isToolCall := false
  924. for _, candidate := range response.Candidates {
  925. choice := dto.OpenAITextResponseChoice{
  926. Index: int(candidate.Index),
  927. Message: dto.Message{
  928. Role: "assistant",
  929. Content: "",
  930. },
  931. FinishReason: constant.FinishReasonStop,
  932. }
  933. if len(candidate.Content.Parts) > 0 {
  934. var texts []string
  935. var toolCalls []dto.ToolCallResponse
  936. for _, part := range candidate.Content.Parts {
  937. if part.InlineData != nil {
  938. // 媒体内容
  939. if strings.HasPrefix(part.InlineData.MimeType, "image") {
  940. imgText := "![image](data:" + part.InlineData.MimeType + ";base64," + part.InlineData.Data + ")"
  941. texts = append(texts, imgText)
  942. } else {
  943. // 其他媒体类型,直接显示链接
  944. texts = append(texts, fmt.Sprintf("[media](data:%s;base64,%s)", part.InlineData.MimeType, part.InlineData.Data))
  945. }
  946. } else if part.FunctionCall != nil {
  947. choice.FinishReason = constant.FinishReasonToolCalls
  948. if call := getResponseToolCall(&part); call != nil {
  949. toolCalls = append(toolCalls, *call)
  950. }
  951. } else if part.Thought {
  952. choice.Message.ReasoningContent = part.Text
  953. } else {
  954. if part.ExecutableCode != nil {
  955. texts = append(texts, "```"+part.ExecutableCode.Language+"\n"+part.ExecutableCode.Code+"\n```")
  956. } else if part.CodeExecutionResult != nil {
  957. texts = append(texts, "```output\n"+part.CodeExecutionResult.Output+"\n```")
  958. } else {
  959. // 过滤掉空行
  960. if part.Text != "\n" {
  961. texts = append(texts, part.Text)
  962. }
  963. }
  964. }
  965. }
  966. if len(toolCalls) > 0 {
  967. choice.Message.SetToolCalls(toolCalls)
  968. isToolCall = true
  969. }
  970. choice.Message.SetStringContent(strings.Join(texts, "\n"))
  971. }
  972. if candidate.FinishReason != nil {
  973. switch *candidate.FinishReason {
  974. case "STOP":
  975. choice.FinishReason = constant.FinishReasonStop
  976. case "MAX_TOKENS":
  977. choice.FinishReason = constant.FinishReasonLength
  978. case "SAFETY":
  979. // Safety filter triggered
  980. choice.FinishReason = constant.FinishReasonContentFilter
  981. case "RECITATION":
  982. // Recitation (citation) detected
  983. choice.FinishReason = constant.FinishReasonContentFilter
  984. case "BLOCKLIST":
  985. // Blocklist triggered
  986. choice.FinishReason = constant.FinishReasonContentFilter
  987. case "PROHIBITED_CONTENT":
  988. // Prohibited content detected
  989. choice.FinishReason = constant.FinishReasonContentFilter
  990. case "SPII":
  991. // Sensitive personally identifiable information
  992. choice.FinishReason = constant.FinishReasonContentFilter
  993. case "OTHER":
  994. // Other reasons
  995. choice.FinishReason = constant.FinishReasonContentFilter
  996. default:
  997. choice.FinishReason = constant.FinishReasonContentFilter
  998. }
  999. }
  1000. if isToolCall {
  1001. choice.FinishReason = constant.FinishReasonToolCalls
  1002. }
  1003. fullTextResponse.Choices = append(fullTextResponse.Choices, choice)
  1004. }
  1005. return &fullTextResponse
  1006. }
  1007. func streamResponseGeminiChat2OpenAI(geminiResponse *dto.GeminiChatResponse) (*dto.ChatCompletionsStreamResponse, bool) {
  1008. choices := make([]dto.ChatCompletionsStreamResponseChoice, 0, len(geminiResponse.Candidates))
  1009. isStop := false
  1010. for _, candidate := range geminiResponse.Candidates {
  1011. if candidate.FinishReason != nil && *candidate.FinishReason == "STOP" {
  1012. isStop = true
  1013. candidate.FinishReason = nil
  1014. }
  1015. choice := dto.ChatCompletionsStreamResponseChoice{
  1016. Index: int(candidate.Index),
  1017. Delta: dto.ChatCompletionsStreamResponseChoiceDelta{
  1018. //Role: "assistant",
  1019. },
  1020. }
  1021. var texts []string
  1022. isTools := false
  1023. isThought := false
  1024. if candidate.FinishReason != nil {
  1025. // Map Gemini FinishReason to OpenAI finish_reason
  1026. switch *candidate.FinishReason {
  1027. case "STOP":
  1028. // Normal completion
  1029. choice.FinishReason = &constant.FinishReasonStop
  1030. case "MAX_TOKENS":
  1031. // Reached maximum token limit
  1032. choice.FinishReason = &constant.FinishReasonLength
  1033. case "SAFETY":
  1034. // Safety filter triggered
  1035. choice.FinishReason = &constant.FinishReasonContentFilter
  1036. case "RECITATION":
  1037. // Recitation (citation) detected
  1038. choice.FinishReason = &constant.FinishReasonContentFilter
  1039. case "BLOCKLIST":
  1040. // Blocklist triggered
  1041. choice.FinishReason = &constant.FinishReasonContentFilter
  1042. case "PROHIBITED_CONTENT":
  1043. // Prohibited content detected
  1044. choice.FinishReason = &constant.FinishReasonContentFilter
  1045. case "SPII":
  1046. // Sensitive personally identifiable information
  1047. choice.FinishReason = &constant.FinishReasonContentFilter
  1048. case "OTHER":
  1049. // Other reasons
  1050. choice.FinishReason = &constant.FinishReasonContentFilter
  1051. default:
  1052. // Unknown reason, treat as content filter
  1053. choice.FinishReason = &constant.FinishReasonContentFilter
  1054. }
  1055. }
  1056. for _, part := range candidate.Content.Parts {
  1057. if part.InlineData != nil {
  1058. if strings.HasPrefix(part.InlineData.MimeType, "image") {
  1059. imgText := "![image](data:" + part.InlineData.MimeType + ";base64," + part.InlineData.Data + ")"
  1060. texts = append(texts, imgText)
  1061. }
  1062. } else if part.FunctionCall != nil {
  1063. isTools = true
  1064. if call := getResponseToolCall(&part); call != nil {
  1065. call.SetIndex(len(choice.Delta.ToolCalls))
  1066. choice.Delta.ToolCalls = append(choice.Delta.ToolCalls, *call)
  1067. }
  1068. } else if part.Thought {
  1069. isThought = true
  1070. texts = append(texts, part.Text)
  1071. } else {
  1072. if part.ExecutableCode != nil {
  1073. texts = append(texts, "```"+part.ExecutableCode.Language+"\n"+part.ExecutableCode.Code+"\n```\n")
  1074. } else if part.CodeExecutionResult != nil {
  1075. texts = append(texts, "```output\n"+part.CodeExecutionResult.Output+"\n```\n")
  1076. } else {
  1077. if part.Text != "\n" {
  1078. texts = append(texts, part.Text)
  1079. }
  1080. }
  1081. }
  1082. }
  1083. if isThought {
  1084. choice.Delta.SetReasoningContent(strings.Join(texts, "\n"))
  1085. } else {
  1086. choice.Delta.SetContentString(strings.Join(texts, "\n"))
  1087. }
  1088. if isTools {
  1089. choice.FinishReason = &constant.FinishReasonToolCalls
  1090. }
  1091. choices = append(choices, choice)
  1092. }
  1093. var response dto.ChatCompletionsStreamResponse
  1094. response.Object = "chat.completion.chunk"
  1095. response.Choices = choices
  1096. return &response, isStop
  1097. }
  1098. func handleStream(c *gin.Context, info *relaycommon.RelayInfo, resp *dto.ChatCompletionsStreamResponse) error {
  1099. streamData, err := common.Marshal(resp)
  1100. if err != nil {
  1101. return fmt.Errorf("failed to marshal stream response: %w", err)
  1102. }
  1103. err = openai.HandleStreamFormat(c, info, string(streamData), info.ChannelSetting.ForceFormat, info.ChannelSetting.ThinkingToContent)
  1104. if err != nil {
  1105. return fmt.Errorf("failed to handle stream format: %w", err)
  1106. }
  1107. return nil
  1108. }
  1109. func handleFinalStream(c *gin.Context, info *relaycommon.RelayInfo, resp *dto.ChatCompletionsStreamResponse) error {
  1110. streamData, err := common.Marshal(resp)
  1111. if err != nil {
  1112. return fmt.Errorf("failed to marshal stream response: %w", err)
  1113. }
  1114. openai.HandleFinalResponse(c, info, string(streamData), resp.Id, resp.Created, resp.Model, resp.GetSystemFingerprint(), resp.Usage, false)
  1115. return nil
  1116. }
  1117. func geminiStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response, callback func(data string, geminiResponse *dto.GeminiChatResponse) bool) (*dto.Usage, *types.NewAPIError) {
  1118. var usage = &dto.Usage{}
  1119. var imageCount int
  1120. responseText := strings.Builder{}
  1121. helper.StreamScannerHandler(c, resp, info, func(data string) bool {
  1122. var geminiResponse dto.GeminiChatResponse
  1123. err := common.UnmarshalJsonStr(data, &geminiResponse)
  1124. if err != nil {
  1125. logger.LogError(c, "error unmarshalling stream response: "+err.Error())
  1126. return false
  1127. }
  1128. if len(geminiResponse.Candidates) == 0 && geminiResponse.PromptFeedback != nil && geminiResponse.PromptFeedback.BlockReason != nil {
  1129. common.SetContextKey(c, constant.ContextKeyAdminRejectReason, fmt.Sprintf("gemini_block_reason=%s", *geminiResponse.PromptFeedback.BlockReason))
  1130. }
  1131. // 统计图片数量
  1132. for _, candidate := range geminiResponse.Candidates {
  1133. for _, part := range candidate.Content.Parts {
  1134. if part.InlineData != nil && part.InlineData.MimeType != "" {
  1135. imageCount++
  1136. }
  1137. if part.Text != "" {
  1138. responseText.WriteString(part.Text)
  1139. }
  1140. }
  1141. }
  1142. // 更新使用量统计
  1143. if geminiResponse.UsageMetadata.TotalTokenCount != 0 {
  1144. usage.PromptTokens = geminiResponse.UsageMetadata.PromptTokenCount
  1145. usage.CompletionTokens = geminiResponse.UsageMetadata.CandidatesTokenCount + geminiResponse.UsageMetadata.ThoughtsTokenCount
  1146. usage.CompletionTokenDetails.ReasoningTokens = geminiResponse.UsageMetadata.ThoughtsTokenCount
  1147. usage.TotalTokens = geminiResponse.UsageMetadata.TotalTokenCount
  1148. usage.PromptTokensDetails.CachedTokens = geminiResponse.UsageMetadata.CachedContentTokenCount
  1149. for _, detail := range geminiResponse.UsageMetadata.PromptTokensDetails {
  1150. if detail.Modality == "AUDIO" {
  1151. usage.PromptTokensDetails.AudioTokens = detail.TokenCount
  1152. } else if detail.Modality == "TEXT" {
  1153. usage.PromptTokensDetails.TextTokens = detail.TokenCount
  1154. }
  1155. }
  1156. }
  1157. return callback(data, &geminiResponse)
  1158. })
  1159. if imageCount != 0 {
  1160. if usage.CompletionTokens == 0 {
  1161. usage.CompletionTokens = imageCount * 1400
  1162. }
  1163. }
  1164. usage.PromptTokensDetails.TextTokens = usage.PromptTokens
  1165. if usage.TotalTokens > 0 {
  1166. usage.CompletionTokens = usage.TotalTokens - usage.PromptTokens
  1167. }
  1168. if usage.CompletionTokens <= 0 {
  1169. if info.ReceivedResponseCount > 0 {
  1170. usage = service.ResponseText2Usage(c, responseText.String(), info.UpstreamModelName, info.GetEstimatePromptTokens())
  1171. } else {
  1172. usage = &dto.Usage{}
  1173. }
  1174. }
  1175. return usage, nil
  1176. }
  1177. func GeminiChatStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
  1178. id := helper.GetResponseID(c)
  1179. createAt := common.GetTimestamp()
  1180. finishReason := constant.FinishReasonStop
  1181. toolCallIndexByChoice := make(map[int]map[string]int)
  1182. nextToolCallIndexByChoice := make(map[int]int)
  1183. usage, err := geminiStreamHandler(c, info, resp, func(data string, geminiResponse *dto.GeminiChatResponse) bool {
  1184. response, isStop := streamResponseGeminiChat2OpenAI(geminiResponse)
  1185. response.Id = id
  1186. response.Created = createAt
  1187. response.Model = info.UpstreamModelName
  1188. for choiceIdx := range response.Choices {
  1189. choiceKey := response.Choices[choiceIdx].Index
  1190. for toolIdx := range response.Choices[choiceIdx].Delta.ToolCalls {
  1191. tool := &response.Choices[choiceIdx].Delta.ToolCalls[toolIdx]
  1192. if tool.ID == "" {
  1193. continue
  1194. }
  1195. m := toolCallIndexByChoice[choiceKey]
  1196. if m == nil {
  1197. m = make(map[string]int)
  1198. toolCallIndexByChoice[choiceKey] = m
  1199. }
  1200. if idx, ok := m[tool.ID]; ok {
  1201. tool.SetIndex(idx)
  1202. continue
  1203. }
  1204. idx := nextToolCallIndexByChoice[choiceKey]
  1205. nextToolCallIndexByChoice[choiceKey] = idx + 1
  1206. m[tool.ID] = idx
  1207. tool.SetIndex(idx)
  1208. }
  1209. }
  1210. logger.LogDebug(c, fmt.Sprintf("info.SendResponseCount = %d", info.SendResponseCount))
  1211. if info.SendResponseCount == 0 {
  1212. // send first response
  1213. emptyResponse := helper.GenerateStartEmptyResponse(id, createAt, info.UpstreamModelName, nil)
  1214. if response.IsToolCall() {
  1215. if len(emptyResponse.Choices) > 0 && len(response.Choices) > 0 {
  1216. toolCalls := response.Choices[0].Delta.ToolCalls
  1217. copiedToolCalls := make([]dto.ToolCallResponse, len(toolCalls))
  1218. for idx := range toolCalls {
  1219. copiedToolCalls[idx] = toolCalls[idx]
  1220. copiedToolCalls[idx].Function.Arguments = ""
  1221. }
  1222. emptyResponse.Choices[0].Delta.ToolCalls = copiedToolCalls
  1223. }
  1224. finishReason = constant.FinishReasonToolCalls
  1225. err := handleStream(c, info, emptyResponse)
  1226. if err != nil {
  1227. logger.LogError(c, err.Error())
  1228. }
  1229. response.ClearToolCalls()
  1230. if response.IsFinished() {
  1231. response.Choices[0].FinishReason = nil
  1232. }
  1233. } else {
  1234. err := handleStream(c, info, emptyResponse)
  1235. if err != nil {
  1236. logger.LogError(c, err.Error())
  1237. }
  1238. }
  1239. }
  1240. err := handleStream(c, info, response)
  1241. if err != nil {
  1242. logger.LogError(c, err.Error())
  1243. }
  1244. if isStop {
  1245. _ = handleStream(c, info, helper.GenerateStopResponse(id, createAt, info.UpstreamModelName, finishReason))
  1246. }
  1247. return true
  1248. })
  1249. if err != nil {
  1250. return usage, err
  1251. }
  1252. response := helper.GenerateFinalUsageResponse(id, createAt, info.UpstreamModelName, *usage)
  1253. handleErr := handleFinalStream(c, info, response)
  1254. if handleErr != nil {
  1255. common.SysLog("send final response failed: " + handleErr.Error())
  1256. }
  1257. return usage, nil
  1258. }
  1259. func GeminiChatHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
  1260. responseBody, err := io.ReadAll(resp.Body)
  1261. if err != nil {
  1262. return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1263. }
  1264. service.CloseResponseBodyGracefully(resp)
  1265. if common.DebugEnabled {
  1266. println(string(responseBody))
  1267. }
  1268. var geminiResponse dto.GeminiChatResponse
  1269. err = common.Unmarshal(responseBody, &geminiResponse)
  1270. if err != nil {
  1271. return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1272. }
  1273. if len(geminiResponse.Candidates) == 0 {
  1274. usage := dto.Usage{
  1275. PromptTokens: geminiResponse.UsageMetadata.PromptTokenCount,
  1276. }
  1277. usage.CompletionTokenDetails.ReasoningTokens = geminiResponse.UsageMetadata.ThoughtsTokenCount
  1278. usage.PromptTokensDetails.CachedTokens = geminiResponse.UsageMetadata.CachedContentTokenCount
  1279. for _, detail := range geminiResponse.UsageMetadata.PromptTokensDetails {
  1280. if detail.Modality == "AUDIO" {
  1281. usage.PromptTokensDetails.AudioTokens = detail.TokenCount
  1282. } else if detail.Modality == "TEXT" {
  1283. usage.PromptTokensDetails.TextTokens = detail.TokenCount
  1284. }
  1285. }
  1286. if usage.PromptTokens <= 0 {
  1287. usage.PromptTokens = info.GetEstimatePromptTokens()
  1288. }
  1289. var newAPIError *types.NewAPIError
  1290. if geminiResponse.PromptFeedback != nil && geminiResponse.PromptFeedback.BlockReason != nil {
  1291. common.SetContextKey(c, constant.ContextKeyAdminRejectReason, fmt.Sprintf("gemini_block_reason=%s", *geminiResponse.PromptFeedback.BlockReason))
  1292. newAPIError = types.NewOpenAIError(
  1293. errors.New("request blocked by Gemini API: "+*geminiResponse.PromptFeedback.BlockReason),
  1294. types.ErrorCodePromptBlocked,
  1295. http.StatusBadRequest,
  1296. )
  1297. } else {
  1298. common.SetContextKey(c, constant.ContextKeyAdminRejectReason, "gemini_empty_candidates")
  1299. newAPIError = types.NewOpenAIError(
  1300. errors.New("empty response from Gemini API"),
  1301. types.ErrorCodeEmptyResponse,
  1302. http.StatusInternalServerError,
  1303. )
  1304. }
  1305. service.ResetStatusCode(newAPIError, c.GetString("status_code_mapping"))
  1306. switch info.RelayFormat {
  1307. case types.RelayFormatClaude:
  1308. c.JSON(newAPIError.StatusCode, gin.H{
  1309. "type": "error",
  1310. "error": newAPIError.ToClaudeError(),
  1311. })
  1312. default:
  1313. c.JSON(newAPIError.StatusCode, gin.H{
  1314. "error": newAPIError.ToOpenAIError(),
  1315. })
  1316. }
  1317. return &usage, nil
  1318. }
  1319. fullTextResponse := responseGeminiChat2OpenAI(c, &geminiResponse)
  1320. fullTextResponse.Model = info.UpstreamModelName
  1321. usage := dto.Usage{
  1322. PromptTokens: geminiResponse.UsageMetadata.PromptTokenCount,
  1323. CompletionTokens: geminiResponse.UsageMetadata.CandidatesTokenCount,
  1324. TotalTokens: geminiResponse.UsageMetadata.TotalTokenCount,
  1325. }
  1326. usage.CompletionTokenDetails.ReasoningTokens = geminiResponse.UsageMetadata.ThoughtsTokenCount
  1327. usage.PromptTokensDetails.CachedTokens = geminiResponse.UsageMetadata.CachedContentTokenCount
  1328. usage.CompletionTokens = usage.TotalTokens - usage.PromptTokens
  1329. for _, detail := range geminiResponse.UsageMetadata.PromptTokensDetails {
  1330. if detail.Modality == "AUDIO" {
  1331. usage.PromptTokensDetails.AudioTokens = detail.TokenCount
  1332. } else if detail.Modality == "TEXT" {
  1333. usage.PromptTokensDetails.TextTokens = detail.TokenCount
  1334. }
  1335. }
  1336. fullTextResponse.Usage = usage
  1337. switch info.RelayFormat {
  1338. case types.RelayFormatOpenAI:
  1339. responseBody, err = common.Marshal(fullTextResponse)
  1340. if err != nil {
  1341. return nil, types.NewError(err, types.ErrorCodeBadResponseBody)
  1342. }
  1343. case types.RelayFormatClaude:
  1344. claudeResp := service.ResponseOpenAI2Claude(fullTextResponse, info)
  1345. claudeRespStr, err := common.Marshal(claudeResp)
  1346. if err != nil {
  1347. return nil, types.NewError(err, types.ErrorCodeBadResponseBody)
  1348. }
  1349. responseBody = claudeRespStr
  1350. case types.RelayFormatGemini:
  1351. break
  1352. }
  1353. service.IOCopyBytesGracefully(c, resp, responseBody)
  1354. return &usage, nil
  1355. }
  1356. func GeminiEmbeddingHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
  1357. defer service.CloseResponseBodyGracefully(resp)
  1358. responseBody, readErr := io.ReadAll(resp.Body)
  1359. if readErr != nil {
  1360. return nil, types.NewOpenAIError(readErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1361. }
  1362. var geminiResponse dto.GeminiBatchEmbeddingResponse
  1363. if jsonErr := common.Unmarshal(responseBody, &geminiResponse); jsonErr != nil {
  1364. return nil, types.NewOpenAIError(jsonErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1365. }
  1366. // convert to openai format response
  1367. openAIResponse := dto.OpenAIEmbeddingResponse{
  1368. Object: "list",
  1369. Data: make([]dto.OpenAIEmbeddingResponseItem, 0, len(geminiResponse.Embeddings)),
  1370. Model: info.UpstreamModelName,
  1371. }
  1372. for i, embedding := range geminiResponse.Embeddings {
  1373. openAIResponse.Data = append(openAIResponse.Data, dto.OpenAIEmbeddingResponseItem{
  1374. Object: "embedding",
  1375. Embedding: embedding.Values,
  1376. Index: i,
  1377. })
  1378. }
  1379. // calculate usage
  1380. // https://ai.google.dev/gemini-api/docs/pricing?hl=zh-cn#text-embedding-004
  1381. // Google has not yet clarified how embedding models will be billed
  1382. // refer to openai billing method to use input tokens billing
  1383. // https://platform.openai.com/docs/guides/embeddings#what-are-embeddings
  1384. usage := service.ResponseText2Usage(c, "", info.UpstreamModelName, info.GetEstimatePromptTokens())
  1385. openAIResponse.Usage = *usage
  1386. jsonResponse, jsonErr := common.Marshal(openAIResponse)
  1387. if jsonErr != nil {
  1388. return nil, types.NewOpenAIError(jsonErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1389. }
  1390. service.IOCopyBytesGracefully(c, resp, jsonResponse)
  1391. return usage, nil
  1392. }
  1393. func GeminiImageHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
  1394. responseBody, readErr := io.ReadAll(resp.Body)
  1395. if readErr != nil {
  1396. return nil, types.NewOpenAIError(readErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1397. }
  1398. _ = resp.Body.Close()
  1399. var geminiResponse dto.GeminiImageResponse
  1400. if jsonErr := common.Unmarshal(responseBody, &geminiResponse); jsonErr != nil {
  1401. return nil, types.NewOpenAIError(jsonErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1402. }
  1403. if len(geminiResponse.Predictions) == 0 {
  1404. return nil, types.NewOpenAIError(errors.New("no images generated"), types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1405. }
  1406. // convert to openai format response
  1407. openAIResponse := dto.ImageResponse{
  1408. Created: common.GetTimestamp(),
  1409. Data: make([]dto.ImageData, 0, len(geminiResponse.Predictions)),
  1410. }
  1411. for _, prediction := range geminiResponse.Predictions {
  1412. if prediction.RaiFilteredReason != "" {
  1413. continue // skip filtered image
  1414. }
  1415. openAIResponse.Data = append(openAIResponse.Data, dto.ImageData{
  1416. B64Json: prediction.BytesBase64Encoded,
  1417. })
  1418. }
  1419. jsonResponse, jsonErr := json.Marshal(openAIResponse)
  1420. if jsonErr != nil {
  1421. return nil, types.NewError(jsonErr, types.ErrorCodeBadResponseBody)
  1422. }
  1423. c.Writer.Header().Set("Content-Type", "application/json")
  1424. c.Writer.WriteHeader(resp.StatusCode)
  1425. _, _ = c.Writer.Write(jsonResponse)
  1426. // https://github.com/google-gemini/cookbook/blob/719a27d752aac33f39de18a8d3cb42a70874917e/quickstarts/Counting_Tokens.ipynb
  1427. // each image has fixed 258 tokens
  1428. const imageTokens = 258
  1429. generatedImages := len(openAIResponse.Data)
  1430. usage := &dto.Usage{
  1431. PromptTokens: imageTokens * generatedImages, // each generated image has fixed 258 tokens
  1432. CompletionTokens: 0, // image generation does not calculate completion tokens
  1433. TotalTokens: imageTokens * generatedImages,
  1434. }
  1435. return usage, nil
  1436. }
  1437. type GeminiModelsResponse struct {
  1438. Models []dto.GeminiModel `json:"models"`
  1439. NextPageToken string `json:"nextPageToken"`
  1440. }
  1441. func FetchGeminiModels(baseURL, apiKey, proxyURL string) ([]string, error) {
  1442. client, err := service.GetHttpClientWithProxy(proxyURL)
  1443. if err != nil {
  1444. return nil, fmt.Errorf("创建HTTP客户端失败: %v", err)
  1445. }
  1446. allModels := make([]string, 0)
  1447. nextPageToken := ""
  1448. maxPages := 100 // Safety limit to prevent infinite loops
  1449. for page := 0; page < maxPages; page++ {
  1450. url := fmt.Sprintf("%s/v1beta/models", baseURL)
  1451. if nextPageToken != "" {
  1452. url = fmt.Sprintf("%s?pageToken=%s", url, nextPageToken)
  1453. }
  1454. ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
  1455. request, err := http.NewRequestWithContext(ctx, "GET", url, nil)
  1456. if err != nil {
  1457. cancel()
  1458. return nil, fmt.Errorf("创建请求失败: %v", err)
  1459. }
  1460. request.Header.Set("x-goog-api-key", apiKey)
  1461. response, err := client.Do(request)
  1462. if err != nil {
  1463. cancel()
  1464. return nil, fmt.Errorf("请求失败: %v", err)
  1465. }
  1466. if response.StatusCode != http.StatusOK {
  1467. body, _ := io.ReadAll(response.Body)
  1468. response.Body.Close()
  1469. cancel()
  1470. return nil, fmt.Errorf("服务器返回错误 %d: %s", response.StatusCode, string(body))
  1471. }
  1472. body, err := io.ReadAll(response.Body)
  1473. response.Body.Close()
  1474. cancel()
  1475. if err != nil {
  1476. return nil, fmt.Errorf("读取响应失败: %v", err)
  1477. }
  1478. var modelsResponse GeminiModelsResponse
  1479. if err = common.Unmarshal(body, &modelsResponse); err != nil {
  1480. return nil, fmt.Errorf("解析响应失败: %v", err)
  1481. }
  1482. for _, model := range modelsResponse.Models {
  1483. modelNameValue, ok := model.Name.(string)
  1484. if !ok {
  1485. continue
  1486. }
  1487. modelName := strings.TrimPrefix(modelNameValue, "models/")
  1488. allModels = append(allModels, modelName)
  1489. }
  1490. nextPageToken = modelsResponse.NextPageToken
  1491. if nextPageToken == "" {
  1492. break
  1493. }
  1494. }
  1495. return allModels, nil
  1496. }
  1497. // convertToolChoiceToGeminiConfig converts OpenAI tool_choice to Gemini toolConfig
  1498. // OpenAI tool_choice values:
  1499. // - "auto": Let the model decide (default)
  1500. // - "none": Don't call any tools
  1501. // - "required": Must call at least one tool
  1502. // - {"type": "function", "function": {"name": "xxx"}}: Call specific function
  1503. //
  1504. // Gemini functionCallingConfig.mode values:
  1505. // - "AUTO": Model decides whether to call functions
  1506. // - "NONE": Model won't call functions
  1507. // - "ANY": Model must call at least one function
  1508. func convertToolChoiceToGeminiConfig(toolChoice any) *dto.ToolConfig {
  1509. if toolChoice == nil {
  1510. return nil
  1511. }
  1512. // Handle string values: "auto", "none", "required"
  1513. if toolChoiceStr, ok := toolChoice.(string); ok {
  1514. config := &dto.ToolConfig{
  1515. FunctionCallingConfig: &dto.FunctionCallingConfig{},
  1516. }
  1517. switch toolChoiceStr {
  1518. case "auto":
  1519. config.FunctionCallingConfig.Mode = "AUTO"
  1520. case "none":
  1521. config.FunctionCallingConfig.Mode = "NONE"
  1522. case "required":
  1523. config.FunctionCallingConfig.Mode = "ANY"
  1524. default:
  1525. // Unknown string value, default to AUTO
  1526. config.FunctionCallingConfig.Mode = "AUTO"
  1527. }
  1528. return config
  1529. }
  1530. // Handle object value: {"type": "function", "function": {"name": "xxx"}}
  1531. if toolChoiceMap, ok := toolChoice.(map[string]interface{}); ok {
  1532. if toolChoiceMap["type"] == "function" {
  1533. config := &dto.ToolConfig{
  1534. FunctionCallingConfig: &dto.FunctionCallingConfig{
  1535. Mode: "ANY",
  1536. },
  1537. }
  1538. // Extract function name if specified
  1539. if function, ok := toolChoiceMap["function"].(map[string]interface{}); ok {
  1540. if name, ok := function["name"].(string); ok && name != "" {
  1541. config.FunctionCallingConfig.AllowedFunctionNames = []string{name}
  1542. }
  1543. }
  1544. return config
  1545. }
  1546. // Unsupported map structure (type is not "function"), return nil
  1547. return nil
  1548. }
  1549. // Unsupported type, return nil
  1550. return nil
  1551. }