relay-gemini.go 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725
  1. package gemini
  2. import (
  3. "context"
  4. "encoding/json"
  5. "errors"
  6. "fmt"
  7. "io"
  8. "net/http"
  9. "strconv"
  10. "strings"
  11. "time"
  12. "unicode/utf8"
  13. "github.com/QuantumNous/new-api/common"
  14. "github.com/QuantumNous/new-api/constant"
  15. "github.com/QuantumNous/new-api/dto"
  16. "github.com/QuantumNous/new-api/logger"
  17. "github.com/QuantumNous/new-api/relay/channel/openai"
  18. relaycommon "github.com/QuantumNous/new-api/relay/common"
  19. "github.com/QuantumNous/new-api/relay/helper"
  20. "github.com/QuantumNous/new-api/service"
  21. "github.com/QuantumNous/new-api/setting/model_setting"
  22. "github.com/QuantumNous/new-api/setting/reasoning"
  23. "github.com/QuantumNous/new-api/types"
  24. "github.com/gin-gonic/gin"
  25. "github.com/samber/lo"
  26. )
  27. // https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference?hl=zh-cn#blob
  28. var geminiSupportedMimeTypes = map[string]bool{
  29. "application/pdf": true,
  30. "audio/mpeg": true,
  31. "audio/mp3": true,
  32. "audio/wav": true,
  33. "image/png": true,
  34. "image/jpeg": true,
  35. "image/jpg": true, // support old image/jpeg
  36. "image/webp": true,
  37. "image/heic": true,
  38. "image/heif": true,
  39. "text/plain": true,
  40. "video/mov": true,
  41. "video/mpeg": true,
  42. "video/mp4": true,
  43. "video/mpg": true,
  44. "video/avi": true,
  45. "video/wmv": true,
  46. "video/mpegps": true,
  47. "video/flv": true,
  48. }
  49. const thoughtSignatureBypassValue = "context_engineering_is_the_way_to_go"
  50. // Gemini 允许的思考预算范围
  51. const (
  52. pro25MinBudget = 128
  53. pro25MaxBudget = 32768
  54. flash25MaxBudget = 24576
  55. flash25LiteMinBudget = 512
  56. flash25LiteMaxBudget = 24576
  57. )
  58. func isNew25ProModel(modelName string) bool {
  59. return strings.HasPrefix(modelName, "gemini-2.5-pro") &&
  60. !strings.HasPrefix(modelName, "gemini-2.5-pro-preview-05-06") &&
  61. !strings.HasPrefix(modelName, "gemini-2.5-pro-preview-03-25")
  62. }
  63. func is25FlashLiteModel(modelName string) bool {
  64. return strings.HasPrefix(modelName, "gemini-2.5-flash-lite")
  65. }
  66. // clampThinkingBudget 根据模型名称将预算限制在允许的范围内
  67. func clampThinkingBudget(modelName string, budget int) int {
  68. isNew25Pro := isNew25ProModel(modelName)
  69. is25FlashLite := is25FlashLiteModel(modelName)
  70. if is25FlashLite {
  71. if budget < flash25LiteMinBudget {
  72. return flash25LiteMinBudget
  73. }
  74. if budget > flash25LiteMaxBudget {
  75. return flash25LiteMaxBudget
  76. }
  77. } else if isNew25Pro {
  78. if budget < pro25MinBudget {
  79. return pro25MinBudget
  80. }
  81. if budget > pro25MaxBudget {
  82. return pro25MaxBudget
  83. }
  84. } else { // 其他模型
  85. if budget < 0 {
  86. return 0
  87. }
  88. if budget > flash25MaxBudget {
  89. return flash25MaxBudget
  90. }
  91. }
  92. return budget
  93. }
  94. // "effort": "high" - Allocates a large portion of tokens for reasoning (approximately 80% of max_tokens)
  95. // "effort": "medium" - Allocates a moderate portion of tokens (approximately 50% of max_tokens)
  96. // "effort": "low" - Allocates a smaller portion of tokens (approximately 20% of max_tokens)
  97. // "effort": "minimal" - Allocates a minimal portion of tokens (approximately 5% of max_tokens)
  98. func clampThinkingBudgetByEffort(modelName string, effort string) int {
  99. isNew25Pro := isNew25ProModel(modelName)
  100. is25FlashLite := is25FlashLiteModel(modelName)
  101. maxBudget := 0
  102. if is25FlashLite {
  103. maxBudget = flash25LiteMaxBudget
  104. }
  105. if isNew25Pro {
  106. maxBudget = pro25MaxBudget
  107. } else {
  108. maxBudget = flash25MaxBudget
  109. }
  110. switch effort {
  111. case "high":
  112. maxBudget = maxBudget * 80 / 100
  113. case "medium":
  114. maxBudget = maxBudget * 50 / 100
  115. case "low":
  116. maxBudget = maxBudget * 20 / 100
  117. case "minimal":
  118. maxBudget = maxBudget * 5 / 100
  119. }
  120. return clampThinkingBudget(modelName, maxBudget)
  121. }
  122. func ThinkingAdaptor(geminiRequest *dto.GeminiChatRequest, info *relaycommon.RelayInfo, oaiRequest ...dto.GeneralOpenAIRequest) {
  123. if model_setting.GetGeminiSettings().ThinkingAdapterEnabled {
  124. modelName := info.UpstreamModelName
  125. isNew25Pro := strings.HasPrefix(modelName, "gemini-2.5-pro") &&
  126. !strings.HasPrefix(modelName, "gemini-2.5-pro-preview-05-06") &&
  127. !strings.HasPrefix(modelName, "gemini-2.5-pro-preview-03-25")
  128. if strings.Contains(modelName, "-thinking-") {
  129. parts := strings.SplitN(modelName, "-thinking-", 2)
  130. if len(parts) == 2 && parts[1] != "" {
  131. if budgetTokens, err := strconv.Atoi(parts[1]); err == nil {
  132. clampedBudget := clampThinkingBudget(modelName, budgetTokens)
  133. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  134. ThinkingBudget: common.GetPointer(clampedBudget),
  135. IncludeThoughts: true,
  136. }
  137. }
  138. }
  139. } else if strings.HasSuffix(modelName, "-thinking") {
  140. unsupportedModels := []string{
  141. "gemini-2.5-pro-preview-05-06",
  142. "gemini-2.5-pro-preview-03-25",
  143. }
  144. isUnsupported := false
  145. for _, unsupportedModel := range unsupportedModels {
  146. if strings.HasPrefix(modelName, unsupportedModel) {
  147. isUnsupported = true
  148. break
  149. }
  150. }
  151. if isUnsupported {
  152. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  153. IncludeThoughts: true,
  154. }
  155. } else {
  156. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  157. IncludeThoughts: true,
  158. }
  159. if geminiRequest.GenerationConfig.MaxOutputTokens != nil && *geminiRequest.GenerationConfig.MaxOutputTokens > 0 {
  160. budgetTokens := model_setting.GetGeminiSettings().ThinkingAdapterBudgetTokensPercentage * float64(*geminiRequest.GenerationConfig.MaxOutputTokens)
  161. clampedBudget := clampThinkingBudget(modelName, int(budgetTokens))
  162. geminiRequest.GenerationConfig.ThinkingConfig.ThinkingBudget = common.GetPointer(clampedBudget)
  163. } else {
  164. if len(oaiRequest) > 0 {
  165. // 如果有reasoningEffort参数,则根据其值设置思考预算
  166. geminiRequest.GenerationConfig.ThinkingConfig.ThinkingBudget = common.GetPointer(clampThinkingBudgetByEffort(modelName, oaiRequest[0].ReasoningEffort))
  167. }
  168. }
  169. }
  170. } else if strings.HasSuffix(modelName, "-nothinking") {
  171. if !isNew25Pro {
  172. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  173. ThinkingBudget: common.GetPointer(0),
  174. }
  175. }
  176. } else if _, level, ok := reasoning.TrimEffortSuffix(info.UpstreamModelName); ok && level != "" {
  177. geminiRequest.GenerationConfig.ThinkingConfig = &dto.GeminiThinkingConfig{
  178. IncludeThoughts: true,
  179. ThinkingLevel: level,
  180. }
  181. info.ReasoningEffort = level
  182. }
  183. }
  184. }
  185. // Setting safety to the lowest possible values since Gemini is already powerless enough
  186. func CovertOpenAI2Gemini(c *gin.Context, textRequest dto.GeneralOpenAIRequest, info *relaycommon.RelayInfo) (*dto.GeminiChatRequest, error) {
  187. geminiRequest := dto.GeminiChatRequest{
  188. Contents: make([]dto.GeminiChatContent, 0, len(textRequest.Messages)),
  189. GenerationConfig: dto.GeminiChatGenerationConfig{
  190. Temperature: textRequest.Temperature,
  191. },
  192. }
  193. if textRequest.TopP != nil && *textRequest.TopP > 0 {
  194. geminiRequest.GenerationConfig.TopP = common.GetPointer(*textRequest.TopP)
  195. }
  196. if maxTokens := textRequest.GetMaxTokens(); maxTokens > 0 {
  197. geminiRequest.GenerationConfig.MaxOutputTokens = common.GetPointer(maxTokens)
  198. }
  199. if textRequest.Seed != nil && *textRequest.Seed != 0 {
  200. geminiSeed := int64(lo.FromPtr(textRequest.Seed))
  201. geminiRequest.GenerationConfig.Seed = common.GetPointer(geminiSeed)
  202. }
  203. attachThoughtSignature := (info.ChannelType == constant.ChannelTypeGemini ||
  204. info.ChannelType == constant.ChannelTypeVertexAi) &&
  205. model_setting.GetGeminiSettings().FunctionCallThoughtSignatureEnabled
  206. if model_setting.IsGeminiModelSupportImagine(info.UpstreamModelName) {
  207. geminiRequest.GenerationConfig.ResponseModalities = []string{
  208. "TEXT",
  209. "IMAGE",
  210. }
  211. }
  212. if stopSequences := parseStopSequences(textRequest.Stop); len(stopSequences) > 0 {
  213. // Gemini supports up to 5 stop sequences
  214. if len(stopSequences) > 5 {
  215. stopSequences = stopSequences[:5]
  216. }
  217. geminiRequest.GenerationConfig.StopSequences = stopSequences
  218. }
  219. adaptorWithExtraBody := false
  220. // patch extra_body
  221. if len(textRequest.ExtraBody) > 0 {
  222. var extraBody map[string]interface{}
  223. if err := common.Unmarshal(textRequest.ExtraBody, &extraBody); err != nil {
  224. return nil, fmt.Errorf("invalid extra body: %w", err)
  225. }
  226. // eg. {"google":{"thinking_config":{"thinking_budget":5324,"include_thoughts":true}}}
  227. if googleBody, ok := extraBody["google"].(map[string]interface{}); ok {
  228. if !strings.HasSuffix(info.UpstreamModelName, "-nothinking") {
  229. adaptorWithExtraBody = true
  230. // check error param name like thinkingConfig, should be thinking_config
  231. if _, hasErrorParam := googleBody["thinkingConfig"]; hasErrorParam {
  232. return nil, errors.New("extra_body.google.thinkingConfig is not supported, use extra_body.google.thinking_config instead")
  233. }
  234. if thinkingConfig, ok := googleBody["thinking_config"].(map[string]interface{}); ok {
  235. // check error param name like thinkingBudget, should be thinking_budget
  236. if _, hasErrorParam := thinkingConfig["thinkingBudget"]; hasErrorParam {
  237. return nil, errors.New("extra_body.google.thinking_config.thinkingBudget is not supported, use extra_body.google.thinking_config.thinking_budget instead")
  238. }
  239. var hasThinkingConfig bool
  240. var tempThinkingConfig dto.GeminiThinkingConfig
  241. if thinkingBudget, exists := thinkingConfig["thinking_budget"]; exists {
  242. switch v := thinkingBudget.(type) {
  243. case float64:
  244. budgetInt := int(v)
  245. tempThinkingConfig.ThinkingBudget = common.GetPointer(budgetInt)
  246. if budgetInt > 0 {
  247. // 有正数预算
  248. tempThinkingConfig.IncludeThoughts = true
  249. } else {
  250. // 存在但为0或负数,禁用思考
  251. tempThinkingConfig.IncludeThoughts = false
  252. }
  253. hasThinkingConfig = true
  254. default:
  255. return nil, errors.New("extra_body.google.thinking_config.thinking_budget must be an integer")
  256. }
  257. }
  258. if includeThoughts, exists := thinkingConfig["include_thoughts"]; exists {
  259. if v, ok := includeThoughts.(bool); ok {
  260. tempThinkingConfig.IncludeThoughts = v
  261. hasThinkingConfig = true
  262. } else {
  263. return nil, errors.New("extra_body.google.thinking_config.include_thoughts must be a boolean")
  264. }
  265. }
  266. if thinkingLevel, exists := thinkingConfig["thinking_level"]; exists {
  267. if v, ok := thinkingLevel.(string); ok {
  268. tempThinkingConfig.ThinkingLevel = v
  269. hasThinkingConfig = true
  270. } else {
  271. return nil, errors.New("extra_body.google.thinking_config.thinking_level must be a string")
  272. }
  273. }
  274. if hasThinkingConfig {
  275. // 避免 panic: 仅在获得配置时分配,防止后续赋值时空指针
  276. if geminiRequest.GenerationConfig.ThinkingConfig == nil {
  277. geminiRequest.GenerationConfig.ThinkingConfig = &tempThinkingConfig
  278. } else {
  279. // 如果已分配,则合并内容
  280. if tempThinkingConfig.ThinkingBudget != nil {
  281. geminiRequest.GenerationConfig.ThinkingConfig.ThinkingBudget = tempThinkingConfig.ThinkingBudget
  282. }
  283. geminiRequest.GenerationConfig.ThinkingConfig.IncludeThoughts = tempThinkingConfig.IncludeThoughts
  284. if tempThinkingConfig.ThinkingLevel != "" {
  285. geminiRequest.GenerationConfig.ThinkingConfig.ThinkingLevel = tempThinkingConfig.ThinkingLevel
  286. }
  287. }
  288. }
  289. }
  290. }
  291. // check error param name like imageConfig, should be image_config
  292. if _, hasErrorParam := googleBody["imageConfig"]; hasErrorParam {
  293. return nil, errors.New("extra_body.google.imageConfig is not supported, use extra_body.google.image_config instead")
  294. }
  295. if imageConfig, ok := googleBody["image_config"].(map[string]interface{}); ok {
  296. // check error param name like aspectRatio, should be aspect_ratio
  297. if _, hasErrorParam := imageConfig["aspectRatio"]; hasErrorParam {
  298. return nil, errors.New("extra_body.google.image_config.aspectRatio is not supported, use extra_body.google.image_config.aspect_ratio instead")
  299. }
  300. // check error param name like imageSize, should be image_size
  301. if _, hasErrorParam := imageConfig["imageSize"]; hasErrorParam {
  302. return nil, errors.New("extra_body.google.image_config.imageSize is not supported, use extra_body.google.image_config.image_size instead")
  303. }
  304. // convert snake_case to camelCase for Gemini API
  305. geminiImageConfig := make(map[string]interface{})
  306. if aspectRatio, ok := imageConfig["aspect_ratio"]; ok {
  307. geminiImageConfig["aspectRatio"] = aspectRatio
  308. }
  309. if imageSize, ok := imageConfig["image_size"]; ok {
  310. geminiImageConfig["imageSize"] = imageSize
  311. }
  312. if len(geminiImageConfig) > 0 {
  313. imageConfigBytes, err := common.Marshal(geminiImageConfig)
  314. if err != nil {
  315. return nil, fmt.Errorf("failed to marshal image_config: %w", err)
  316. }
  317. geminiRequest.GenerationConfig.ImageConfig = imageConfigBytes
  318. }
  319. }
  320. }
  321. }
  322. if !adaptorWithExtraBody {
  323. ThinkingAdaptor(&geminiRequest, info, textRequest)
  324. }
  325. safetySettings := make([]dto.GeminiChatSafetySettings, 0, len(SafetySettingList))
  326. for _, category := range SafetySettingList {
  327. safetySettings = append(safetySettings, dto.GeminiChatSafetySettings{
  328. Category: category,
  329. Threshold: model_setting.GetGeminiSafetySetting(category),
  330. })
  331. }
  332. geminiRequest.SafetySettings = safetySettings
  333. // openaiContent.FuncToToolCalls()
  334. if textRequest.Tools != nil {
  335. functions := make([]dto.FunctionRequest, 0, len(textRequest.Tools))
  336. googleSearch := false
  337. codeExecution := false
  338. urlContext := false
  339. for _, tool := range textRequest.Tools {
  340. if tool.Function.Name == "googleSearch" {
  341. googleSearch = true
  342. continue
  343. }
  344. if tool.Function.Name == "codeExecution" {
  345. codeExecution = true
  346. continue
  347. }
  348. if tool.Function.Name == "urlContext" {
  349. urlContext = true
  350. continue
  351. }
  352. if tool.Function.Parameters != nil {
  353. params, ok := tool.Function.Parameters.(map[string]interface{})
  354. if ok {
  355. if props, hasProps := params["properties"].(map[string]interface{}); hasProps {
  356. if len(props) == 0 {
  357. tool.Function.Parameters = nil
  358. }
  359. }
  360. }
  361. }
  362. // Clean the parameters before appending
  363. cleanedParams := cleanFunctionParameters(tool.Function.Parameters)
  364. tool.Function.Parameters = cleanedParams
  365. functions = append(functions, tool.Function)
  366. }
  367. geminiTools := geminiRequest.GetTools()
  368. if codeExecution {
  369. geminiTools = append(geminiTools, dto.GeminiChatTool{
  370. CodeExecution: make(map[string]string),
  371. })
  372. }
  373. if googleSearch {
  374. geminiTools = append(geminiTools, dto.GeminiChatTool{
  375. GoogleSearch: make(map[string]string),
  376. })
  377. }
  378. if urlContext {
  379. geminiTools = append(geminiTools, dto.GeminiChatTool{
  380. URLContext: make(map[string]string),
  381. })
  382. }
  383. if len(functions) > 0 {
  384. geminiTools = append(geminiTools, dto.GeminiChatTool{
  385. FunctionDeclarations: functions,
  386. })
  387. }
  388. geminiRequest.SetTools(geminiTools)
  389. // [NEW] Convert OpenAI tool_choice to Gemini toolConfig.functionCallingConfig
  390. // Mapping: "auto" -> "AUTO", "none" -> "NONE", "required" -> "ANY"
  391. // Object format: {"type": "function", "function": {"name": "xxx"}} -> "ANY" + allowedFunctionNames
  392. if textRequest.ToolChoice != nil {
  393. geminiRequest.ToolConfig = convertToolChoiceToGeminiConfig(textRequest.ToolChoice)
  394. }
  395. }
  396. if textRequest.ResponseFormat != nil && (textRequest.ResponseFormat.Type == "json_schema" || textRequest.ResponseFormat.Type == "json_object") {
  397. geminiRequest.GenerationConfig.ResponseMimeType = "application/json"
  398. if len(textRequest.ResponseFormat.JsonSchema) > 0 {
  399. // 先将json.RawMessage解析
  400. var jsonSchema dto.FormatJsonSchema
  401. if err := common.Unmarshal(textRequest.ResponseFormat.JsonSchema, &jsonSchema); err == nil {
  402. cleanedSchema := removeAdditionalPropertiesWithDepth(jsonSchema.Schema, 0)
  403. geminiRequest.GenerationConfig.ResponseSchema = cleanedSchema
  404. }
  405. }
  406. }
  407. tool_call_ids := make(map[string]string)
  408. var system_content []string
  409. //shouldAddDummyModelMessage := false
  410. for _, message := range textRequest.Messages {
  411. if message.Role == "system" || message.Role == "developer" {
  412. system_content = append(system_content, message.StringContent())
  413. continue
  414. } else if message.Role == "tool" || message.Role == "function" {
  415. if len(geminiRequest.Contents) == 0 || geminiRequest.Contents[len(geminiRequest.Contents)-1].Role == "model" {
  416. geminiRequest.Contents = append(geminiRequest.Contents, dto.GeminiChatContent{
  417. Role: "user",
  418. })
  419. }
  420. var parts = &geminiRequest.Contents[len(geminiRequest.Contents)-1].Parts
  421. name := ""
  422. if message.Name != nil {
  423. name = *message.Name
  424. } else if val, exists := tool_call_ids[message.ToolCallId]; exists {
  425. name = val
  426. }
  427. var contentMap map[string]interface{}
  428. contentStr := message.StringContent()
  429. // 1. 尝试解析为 JSON 对象
  430. if err := json.Unmarshal([]byte(contentStr), &contentMap); err != nil {
  431. // 2. 如果失败,尝试解析为 JSON 数组
  432. var contentSlice []interface{}
  433. if err := json.Unmarshal([]byte(contentStr), &contentSlice); err == nil {
  434. // 如果是数组,包装成对象
  435. contentMap = map[string]interface{}{"result": contentSlice}
  436. } else {
  437. // 3. 如果再次失败,作为纯文本处理
  438. contentMap = map[string]interface{}{"content": contentStr}
  439. }
  440. }
  441. functionResp := &dto.GeminiFunctionResponse{
  442. Name: name,
  443. Response: contentMap,
  444. }
  445. *parts = append(*parts, dto.GeminiPart{
  446. FunctionResponse: functionResp,
  447. })
  448. continue
  449. }
  450. var parts []dto.GeminiPart
  451. content := dto.GeminiChatContent{
  452. Role: message.Role,
  453. }
  454. shouldAttachThoughtSignature := attachThoughtSignature && (message.Role == "assistant" || message.Role == "model")
  455. signatureAttached := false
  456. // isToolCall := false
  457. if message.ToolCalls != nil {
  458. // message.Role = "model"
  459. // isToolCall = true
  460. for _, call := range message.ParseToolCalls() {
  461. args := map[string]interface{}{}
  462. if call.Function.Arguments != "" {
  463. if json.Unmarshal([]byte(call.Function.Arguments), &args) != nil {
  464. return nil, fmt.Errorf("invalid arguments for function %s, args: %s", call.Function.Name, call.Function.Arguments)
  465. }
  466. }
  467. toolCall := dto.GeminiPart{
  468. FunctionCall: &dto.FunctionCall{
  469. FunctionName: call.Function.Name,
  470. Arguments: args,
  471. },
  472. }
  473. if shouldAttachThoughtSignature && !signatureAttached && hasFunctionCallContent(toolCall.FunctionCall) && len(toolCall.ThoughtSignature) == 0 {
  474. toolCall.ThoughtSignature = json.RawMessage(strconv.Quote(thoughtSignatureBypassValue))
  475. signatureAttached = true
  476. }
  477. parts = append(parts, toolCall)
  478. tool_call_ids[call.ID] = call.Function.Name
  479. }
  480. }
  481. openaiContent := message.ParseContent()
  482. for _, part := range openaiContent {
  483. if part.Type == dto.ContentTypeText {
  484. if part.Text == "" {
  485. continue
  486. }
  487. // check markdown image ![image](data:image/jpeg;base64,xxxxxxxxxxxx)
  488. // 使用字符串查找而非正则,避免大文本性能问题
  489. text := part.Text
  490. hasMarkdownImage := false
  491. for {
  492. // 快速检查是否包含 markdown 图片标记
  493. startIdx := strings.Index(text, "![")
  494. if startIdx == -1 {
  495. break
  496. }
  497. // 找到 ](
  498. bracketIdx := strings.Index(text[startIdx:], "](data:")
  499. if bracketIdx == -1 {
  500. break
  501. }
  502. bracketIdx += startIdx
  503. // 找到闭合的 )
  504. closeIdx := strings.Index(text[bracketIdx+2:], ")")
  505. if closeIdx == -1 {
  506. break
  507. }
  508. closeIdx += bracketIdx + 2
  509. hasMarkdownImage = true
  510. // 添加图片前的文本
  511. if startIdx > 0 {
  512. textBefore := text[:startIdx]
  513. if textBefore != "" {
  514. parts = append(parts, dto.GeminiPart{
  515. Text: textBefore,
  516. })
  517. }
  518. }
  519. // 提取 data URL (从 "](" 后面开始,到 ")" 之前)
  520. dataUrl := text[bracketIdx+2 : closeIdx]
  521. format, base64String, err := service.DecodeBase64FileData(dataUrl)
  522. if err != nil {
  523. return nil, fmt.Errorf("decode markdown base64 image data failed: %s", err.Error())
  524. }
  525. imgPart := dto.GeminiPart{
  526. InlineData: &dto.GeminiInlineData{
  527. MimeType: format,
  528. Data: base64String,
  529. },
  530. }
  531. if shouldAttachThoughtSignature {
  532. imgPart.ThoughtSignature = json.RawMessage(strconv.Quote(thoughtSignatureBypassValue))
  533. }
  534. parts = append(parts, imgPart)
  535. // 继续处理剩余文本
  536. text = text[closeIdx+1:]
  537. }
  538. // 添加剩余文本或原始文本(如果没有找到 markdown 图片)
  539. if !hasMarkdownImage {
  540. parts = append(parts, dto.GeminiPart{
  541. Text: part.Text,
  542. })
  543. }
  544. } else {
  545. source := part.ToFileSource()
  546. if source == nil {
  547. continue
  548. }
  549. base64Data, mimeType, err := service.GetBase64Data(c, source, "formatting image for Gemini")
  550. if err != nil {
  551. return nil, fmt.Errorf("get file data from '%s' failed: %w", source.GetIdentifier(), err)
  552. }
  553. // 校验 MimeType 是否在 Gemini 支持的白名单中
  554. if _, ok := geminiSupportedMimeTypes[strings.ToLower(mimeType)]; !ok {
  555. return nil, fmt.Errorf("mime type is not supported by Gemini: '%s', url: '%s', supported types are: %v", mimeType, source.GetIdentifier(), getSupportedMimeTypesList())
  556. }
  557. parts = append(parts, dto.GeminiPart{
  558. InlineData: &dto.GeminiInlineData{
  559. MimeType: mimeType,
  560. Data: base64Data,
  561. },
  562. })
  563. }
  564. }
  565. // 如果需要附加签名但还没有附加(没有 tool_calls 或 tool_calls 为空),
  566. // 则在第一个文本 part 上附加 thoughtSignature
  567. if shouldAttachThoughtSignature && !signatureAttached && len(parts) > 0 {
  568. for i := range parts {
  569. if parts[i].Text != "" {
  570. parts[i].ThoughtSignature = json.RawMessage(strconv.Quote(thoughtSignatureBypassValue))
  571. break
  572. }
  573. }
  574. }
  575. content.Parts = parts
  576. // there's no assistant role in gemini and API shall vomit if Role is not user or model
  577. if content.Role == "assistant" {
  578. content.Role = "model"
  579. }
  580. if len(content.Parts) > 0 {
  581. geminiRequest.Contents = append(geminiRequest.Contents, content)
  582. }
  583. }
  584. if len(system_content) > 0 {
  585. geminiRequest.SystemInstructions = &dto.GeminiChatContent{
  586. Parts: []dto.GeminiPart{
  587. {
  588. Text: strings.Join(system_content, "\n"),
  589. },
  590. },
  591. }
  592. }
  593. return &geminiRequest, nil
  594. }
  595. // parseStopSequences 解析停止序列,支持字符串或字符串数组
  596. func parseStopSequences(stop any) []string {
  597. if stop == nil {
  598. return nil
  599. }
  600. switch v := stop.(type) {
  601. case string:
  602. if v != "" {
  603. return []string{v}
  604. }
  605. case []string:
  606. return v
  607. case []interface{}:
  608. sequences := make([]string, 0, len(v))
  609. for _, item := range v {
  610. if str, ok := item.(string); ok && str != "" {
  611. sequences = append(sequences, str)
  612. }
  613. }
  614. return sequences
  615. }
  616. return nil
  617. }
  618. func hasFunctionCallContent(call *dto.FunctionCall) bool {
  619. if call == nil {
  620. return false
  621. }
  622. if strings.TrimSpace(call.FunctionName) != "" {
  623. return true
  624. }
  625. switch v := call.Arguments.(type) {
  626. case nil:
  627. return false
  628. case string:
  629. return strings.TrimSpace(v) != ""
  630. case map[string]interface{}:
  631. return len(v) > 0
  632. case []interface{}:
  633. return len(v) > 0
  634. default:
  635. return true
  636. }
  637. }
  638. // Helper function to get a list of supported MIME types for error messages
  639. func getSupportedMimeTypesList() []string {
  640. keys := make([]string, 0, len(geminiSupportedMimeTypes))
  641. for k := range geminiSupportedMimeTypes {
  642. keys = append(keys, k)
  643. }
  644. return keys
  645. }
  646. var geminiOpenAPISchemaAllowedFields = map[string]struct{}{
  647. "anyOf": {},
  648. "default": {},
  649. "description": {},
  650. "enum": {},
  651. "example": {},
  652. "format": {},
  653. "items": {},
  654. "maxItems": {},
  655. "maxLength": {},
  656. "maxProperties": {},
  657. "maximum": {},
  658. "minItems": {},
  659. "minLength": {},
  660. "minProperties": {},
  661. "minimum": {},
  662. "nullable": {},
  663. "pattern": {},
  664. "properties": {},
  665. "propertyOrdering": {},
  666. "required": {},
  667. "title": {},
  668. "type": {},
  669. }
  670. const geminiFunctionSchemaMaxDepth = 64
  671. // cleanFunctionParameters recursively removes unsupported fields from Gemini function parameters.
  672. func cleanFunctionParameters(params interface{}) interface{} {
  673. return cleanFunctionParametersWithDepth(params, 0)
  674. }
  675. func cleanFunctionParametersWithDepth(params interface{}, depth int) interface{} {
  676. if params == nil {
  677. return nil
  678. }
  679. if depth >= geminiFunctionSchemaMaxDepth {
  680. return cleanFunctionParametersShallow(params)
  681. }
  682. switch v := params.(type) {
  683. case map[string]interface{}:
  684. // Keep only Gemini-supported OpenAPI schema subset fields (per official SDK Schema).
  685. cleanedMap := make(map[string]interface{}, len(v))
  686. for k, val := range v {
  687. if _, ok := geminiOpenAPISchemaAllowedFields[k]; ok {
  688. cleanedMap[k] = val
  689. }
  690. }
  691. normalizeGeminiSchemaTypeAndNullable(cleanedMap)
  692. // Clean properties
  693. if props, ok := cleanedMap["properties"].(map[string]interface{}); ok && props != nil {
  694. cleanedProps := make(map[string]interface{})
  695. for propName, propValue := range props {
  696. cleanedProps[propName] = cleanFunctionParametersWithDepth(propValue, depth+1)
  697. }
  698. cleanedMap["properties"] = cleanedProps
  699. }
  700. // Recursively clean items in arrays
  701. if items, ok := cleanedMap["items"].(map[string]interface{}); ok && items != nil {
  702. cleanedMap["items"] = cleanFunctionParametersWithDepth(items, depth+1)
  703. }
  704. // OpenAPI tuple-style items is not supported by Gemini SDK Schema; keep first to avoid API rejection.
  705. if itemsArray, ok := cleanedMap["items"].([]interface{}); ok && len(itemsArray) > 0 {
  706. cleanedMap["items"] = cleanFunctionParametersWithDepth(itemsArray[0], depth+1)
  707. }
  708. // Recursively clean anyOf
  709. if nested, ok := cleanedMap["anyOf"].([]interface{}); ok && nested != nil {
  710. cleanedNested := make([]interface{}, len(nested))
  711. for i, item := range nested {
  712. cleanedNested[i] = cleanFunctionParametersWithDepth(item, depth+1)
  713. }
  714. cleanedMap["anyOf"] = cleanedNested
  715. }
  716. return cleanedMap
  717. case []interface{}:
  718. // Handle arrays of schemas
  719. cleanedArray := make([]interface{}, len(v))
  720. for i, item := range v {
  721. cleanedArray[i] = cleanFunctionParametersWithDepth(item, depth+1)
  722. }
  723. return cleanedArray
  724. default:
  725. // Not a map or array, return as is (e.g., could be a primitive)
  726. return params
  727. }
  728. }
  729. func cleanFunctionParametersShallow(params interface{}) interface{} {
  730. switch v := params.(type) {
  731. case map[string]interface{}:
  732. cleanedMap := make(map[string]interface{}, len(v))
  733. for k, val := range v {
  734. if _, ok := geminiOpenAPISchemaAllowedFields[k]; ok {
  735. cleanedMap[k] = val
  736. }
  737. }
  738. normalizeGeminiSchemaTypeAndNullable(cleanedMap)
  739. // Stop recursion and avoid retaining huge nested structures.
  740. delete(cleanedMap, "properties")
  741. delete(cleanedMap, "items")
  742. delete(cleanedMap, "anyOf")
  743. return cleanedMap
  744. case []interface{}:
  745. // Prefer an empty list over deep recursion on attacker-controlled inputs.
  746. return []interface{}{}
  747. default:
  748. return params
  749. }
  750. }
  751. func normalizeGeminiSchemaTypeAndNullable(schema map[string]interface{}) {
  752. rawType, ok := schema["type"]
  753. if !ok || rawType == nil {
  754. return
  755. }
  756. normalize := func(t string) (string, bool) {
  757. switch strings.ToLower(strings.TrimSpace(t)) {
  758. case "object":
  759. return "OBJECT", false
  760. case "array":
  761. return "ARRAY", false
  762. case "string":
  763. return "STRING", false
  764. case "integer":
  765. return "INTEGER", false
  766. case "number":
  767. return "NUMBER", false
  768. case "boolean":
  769. return "BOOLEAN", false
  770. case "null":
  771. return "", true
  772. default:
  773. return t, false
  774. }
  775. }
  776. switch t := rawType.(type) {
  777. case string:
  778. normalized, isNull := normalize(t)
  779. if isNull {
  780. schema["nullable"] = true
  781. delete(schema, "type")
  782. return
  783. }
  784. schema["type"] = normalized
  785. case []interface{}:
  786. nullable := false
  787. var chosen string
  788. for _, item := range t {
  789. if s, ok := item.(string); ok {
  790. normalized, isNull := normalize(s)
  791. if isNull {
  792. nullable = true
  793. continue
  794. }
  795. if chosen == "" {
  796. chosen = normalized
  797. }
  798. }
  799. }
  800. if nullable {
  801. schema["nullable"] = true
  802. }
  803. if chosen != "" {
  804. schema["type"] = chosen
  805. } else {
  806. delete(schema, "type")
  807. }
  808. }
  809. }
  810. func removeAdditionalPropertiesWithDepth(schema interface{}, depth int) interface{} {
  811. if depth >= 5 {
  812. return schema
  813. }
  814. v, ok := schema.(map[string]interface{})
  815. if !ok || len(v) == 0 {
  816. return schema
  817. }
  818. // 删除所有的title字段
  819. delete(v, "title")
  820. delete(v, "$schema")
  821. // 如果type不为object和array,则直接返回
  822. if typeVal, exists := v["type"]; !exists || (typeVal != "object" && typeVal != "array") {
  823. return schema
  824. }
  825. switch v["type"] {
  826. case "object":
  827. delete(v, "additionalProperties")
  828. // 处理 properties
  829. if properties, ok := v["properties"].(map[string]interface{}); ok {
  830. for key, value := range properties {
  831. properties[key] = removeAdditionalPropertiesWithDepth(value, depth+1)
  832. }
  833. }
  834. for _, field := range []string{"allOf", "anyOf", "oneOf"} {
  835. if nested, ok := v[field].([]interface{}); ok {
  836. for i, item := range nested {
  837. nested[i] = removeAdditionalPropertiesWithDepth(item, depth+1)
  838. }
  839. }
  840. }
  841. case "array":
  842. if items, ok := v["items"].(map[string]interface{}); ok {
  843. v["items"] = removeAdditionalPropertiesWithDepth(items, depth+1)
  844. }
  845. }
  846. return v
  847. }
  848. func unescapeString(s string) (string, error) {
  849. var result []rune
  850. escaped := false
  851. i := 0
  852. for i < len(s) {
  853. r, size := utf8.DecodeRuneInString(s[i:]) // 正确解码UTF-8字符
  854. if r == utf8.RuneError {
  855. return "", fmt.Errorf("invalid UTF-8 encoding")
  856. }
  857. if escaped {
  858. // 如果是转义符后的字符,检查其类型
  859. switch r {
  860. case '"':
  861. result = append(result, '"')
  862. case '\\':
  863. result = append(result, '\\')
  864. case '/':
  865. result = append(result, '/')
  866. case 'b':
  867. result = append(result, '\b')
  868. case 'f':
  869. result = append(result, '\f')
  870. case 'n':
  871. result = append(result, '\n')
  872. case 'r':
  873. result = append(result, '\r')
  874. case 't':
  875. result = append(result, '\t')
  876. case '\'':
  877. result = append(result, '\'')
  878. default:
  879. // 如果遇到一个非法的转义字符,直接按原样输出
  880. result = append(result, '\\', r)
  881. }
  882. escaped = false
  883. } else {
  884. if r == '\\' {
  885. escaped = true // 记录反斜杠作为转义符
  886. } else {
  887. result = append(result, r)
  888. }
  889. }
  890. i += size // 移动到下一个字符
  891. }
  892. return string(result), nil
  893. }
  894. func unescapeMapOrSlice(data interface{}) interface{} {
  895. switch v := data.(type) {
  896. case map[string]interface{}:
  897. for k, val := range v {
  898. v[k] = unescapeMapOrSlice(val)
  899. }
  900. case []interface{}:
  901. for i, val := range v {
  902. v[i] = unescapeMapOrSlice(val)
  903. }
  904. case string:
  905. if unescaped, err := unescapeString(v); err != nil {
  906. return v
  907. } else {
  908. return unescaped
  909. }
  910. }
  911. return data
  912. }
  913. func getResponseToolCall(item *dto.GeminiPart) *dto.ToolCallResponse {
  914. var argsBytes []byte
  915. var err error
  916. // 移除 unescapeMapOrSlice 调用,直接使用 json.Marshal
  917. // JSON 序列化/反序列化已经正确处理了转义字符
  918. argsBytes, err = json.Marshal(item.FunctionCall.Arguments)
  919. if err != nil {
  920. return nil
  921. }
  922. return &dto.ToolCallResponse{
  923. ID: fmt.Sprintf("call_%s", common.GetUUID()),
  924. Type: "function",
  925. Function: dto.FunctionResponse{
  926. Arguments: string(argsBytes),
  927. Name: item.FunctionCall.FunctionName,
  928. },
  929. }
  930. }
  931. func buildUsageFromGeminiMetadata(metadata dto.GeminiUsageMetadata, fallbackPromptTokens int) dto.Usage {
  932. promptTokens := metadata.PromptTokenCount + metadata.ToolUsePromptTokenCount
  933. if promptTokens <= 0 && fallbackPromptTokens > 0 {
  934. promptTokens = fallbackPromptTokens
  935. }
  936. usage := dto.Usage{
  937. PromptTokens: promptTokens,
  938. CompletionTokens: metadata.CandidatesTokenCount + metadata.ThoughtsTokenCount,
  939. TotalTokens: metadata.TotalTokenCount,
  940. }
  941. usage.CompletionTokenDetails.ReasoningTokens = metadata.ThoughtsTokenCount
  942. usage.PromptTokensDetails.CachedTokens = metadata.CachedContentTokenCount
  943. for _, detail := range metadata.PromptTokensDetails {
  944. if detail.Modality == "AUDIO" {
  945. usage.PromptTokensDetails.AudioTokens += detail.TokenCount
  946. } else if detail.Modality == "TEXT" {
  947. usage.PromptTokensDetails.TextTokens += detail.TokenCount
  948. }
  949. }
  950. for _, detail := range metadata.ToolUsePromptTokensDetails {
  951. if detail.Modality == "AUDIO" {
  952. usage.PromptTokensDetails.AudioTokens += detail.TokenCount
  953. } else if detail.Modality == "TEXT" {
  954. usage.PromptTokensDetails.TextTokens += detail.TokenCount
  955. }
  956. }
  957. for _, detail := range metadata.CandidatesTokensDetails {
  958. switch detail.Modality {
  959. case "IMAGE":
  960. usage.CompletionTokenDetails.ImageTokens += detail.TokenCount
  961. case "AUDIO":
  962. usage.CompletionTokenDetails.AudioTokens += detail.TokenCount
  963. case "TEXT":
  964. usage.CompletionTokenDetails.TextTokens += detail.TokenCount
  965. }
  966. }
  967. if usage.TotalTokens > 0 && usage.CompletionTokens <= 0 {
  968. usage.CompletionTokens = usage.TotalTokens - usage.PromptTokens
  969. }
  970. if usage.PromptTokens > 0 && usage.PromptTokensDetails.TextTokens == 0 && usage.PromptTokensDetails.AudioTokens == 0 {
  971. usage.PromptTokensDetails.TextTokens = usage.PromptTokens
  972. }
  973. return usage
  974. }
  975. func responseGeminiChat2OpenAI(c *gin.Context, response *dto.GeminiChatResponse) *dto.OpenAITextResponse {
  976. fullTextResponse := dto.OpenAITextResponse{
  977. Id: helper.GetResponseID(c),
  978. Object: "chat.completion",
  979. Created: common.GetTimestamp(),
  980. Choices: make([]dto.OpenAITextResponseChoice, 0, len(response.Candidates)),
  981. }
  982. isToolCall := false
  983. for _, candidate := range response.Candidates {
  984. choice := dto.OpenAITextResponseChoice{
  985. Index: int(candidate.Index),
  986. Message: dto.Message{
  987. Role: "assistant",
  988. Content: "",
  989. },
  990. FinishReason: constant.FinishReasonStop,
  991. }
  992. if len(candidate.Content.Parts) > 0 {
  993. var texts []string
  994. var toolCalls []dto.ToolCallResponse
  995. for _, part := range candidate.Content.Parts {
  996. if part.InlineData != nil {
  997. // 媒体内容
  998. if strings.HasPrefix(part.InlineData.MimeType, "image") {
  999. imgText := "![image](data:" + part.InlineData.MimeType + ";base64," + part.InlineData.Data + ")"
  1000. texts = append(texts, imgText)
  1001. } else {
  1002. // 其他媒体类型,直接显示链接
  1003. texts = append(texts, fmt.Sprintf("[media](data:%s;base64,%s)", part.InlineData.MimeType, part.InlineData.Data))
  1004. }
  1005. } else if part.FunctionCall != nil {
  1006. choice.FinishReason = constant.FinishReasonToolCalls
  1007. if call := getResponseToolCall(&part); call != nil {
  1008. toolCalls = append(toolCalls, *call)
  1009. }
  1010. } else if part.Thought {
  1011. choice.Message.ReasoningContent = &part.Text
  1012. } else {
  1013. if part.ExecutableCode != nil {
  1014. texts = append(texts, "```"+part.ExecutableCode.Language+"\n"+part.ExecutableCode.Code+"\n```")
  1015. } else if part.CodeExecutionResult != nil {
  1016. texts = append(texts, "```output\n"+part.CodeExecutionResult.Output+"\n```")
  1017. } else {
  1018. // 过滤掉空行
  1019. if part.Text != "\n" {
  1020. texts = append(texts, part.Text)
  1021. }
  1022. }
  1023. }
  1024. }
  1025. if len(toolCalls) > 0 {
  1026. choice.Message.SetToolCalls(toolCalls)
  1027. isToolCall = true
  1028. }
  1029. choice.Message.SetStringContent(strings.Join(texts, "\n"))
  1030. }
  1031. if candidate.FinishReason != nil {
  1032. switch *candidate.FinishReason {
  1033. case "STOP":
  1034. choice.FinishReason = constant.FinishReasonStop
  1035. case "MAX_TOKENS":
  1036. choice.FinishReason = constant.FinishReasonLength
  1037. case "SAFETY":
  1038. // Safety filter triggered
  1039. choice.FinishReason = constant.FinishReasonContentFilter
  1040. case "RECITATION":
  1041. // Recitation (citation) detected
  1042. choice.FinishReason = constant.FinishReasonContentFilter
  1043. case "BLOCKLIST":
  1044. // Blocklist triggered
  1045. choice.FinishReason = constant.FinishReasonContentFilter
  1046. case "PROHIBITED_CONTENT":
  1047. // Prohibited content detected
  1048. choice.FinishReason = constant.FinishReasonContentFilter
  1049. case "SPII":
  1050. // Sensitive personally identifiable information
  1051. choice.FinishReason = constant.FinishReasonContentFilter
  1052. case "OTHER":
  1053. // Other reasons
  1054. choice.FinishReason = constant.FinishReasonContentFilter
  1055. default:
  1056. choice.FinishReason = constant.FinishReasonContentFilter
  1057. }
  1058. }
  1059. if isToolCall {
  1060. choice.FinishReason = constant.FinishReasonToolCalls
  1061. }
  1062. fullTextResponse.Choices = append(fullTextResponse.Choices, choice)
  1063. }
  1064. return &fullTextResponse
  1065. }
  1066. func streamResponseGeminiChat2OpenAI(geminiResponse *dto.GeminiChatResponse) (*dto.ChatCompletionsStreamResponse, bool) {
  1067. choices := make([]dto.ChatCompletionsStreamResponseChoice, 0, len(geminiResponse.Candidates))
  1068. isStop := false
  1069. for _, candidate := range geminiResponse.Candidates {
  1070. if candidate.FinishReason != nil && *candidate.FinishReason == "STOP" {
  1071. isStop = true
  1072. candidate.FinishReason = nil
  1073. }
  1074. choice := dto.ChatCompletionsStreamResponseChoice{
  1075. Index: int(candidate.Index),
  1076. Delta: dto.ChatCompletionsStreamResponseChoiceDelta{
  1077. //Role: "assistant",
  1078. },
  1079. }
  1080. var texts []string
  1081. isTools := false
  1082. isThought := false
  1083. if candidate.FinishReason != nil {
  1084. // Map Gemini FinishReason to OpenAI finish_reason
  1085. switch *candidate.FinishReason {
  1086. case "STOP":
  1087. // Normal completion
  1088. choice.FinishReason = &constant.FinishReasonStop
  1089. case "MAX_TOKENS":
  1090. // Reached maximum token limit
  1091. choice.FinishReason = &constant.FinishReasonLength
  1092. case "SAFETY":
  1093. // Safety filter triggered
  1094. choice.FinishReason = &constant.FinishReasonContentFilter
  1095. case "RECITATION":
  1096. // Recitation (citation) detected
  1097. choice.FinishReason = &constant.FinishReasonContentFilter
  1098. case "BLOCKLIST":
  1099. // Blocklist triggered
  1100. choice.FinishReason = &constant.FinishReasonContentFilter
  1101. case "PROHIBITED_CONTENT":
  1102. // Prohibited content detected
  1103. choice.FinishReason = &constant.FinishReasonContentFilter
  1104. case "SPII":
  1105. // Sensitive personally identifiable information
  1106. choice.FinishReason = &constant.FinishReasonContentFilter
  1107. case "OTHER":
  1108. // Other reasons
  1109. choice.FinishReason = &constant.FinishReasonContentFilter
  1110. default:
  1111. // Unknown reason, treat as content filter
  1112. choice.FinishReason = &constant.FinishReasonContentFilter
  1113. }
  1114. }
  1115. for _, part := range candidate.Content.Parts {
  1116. if part.InlineData != nil {
  1117. if strings.HasPrefix(part.InlineData.MimeType, "image") {
  1118. imgText := "![image](data:" + part.InlineData.MimeType + ";base64," + part.InlineData.Data + ")"
  1119. texts = append(texts, imgText)
  1120. }
  1121. } else if part.FunctionCall != nil {
  1122. isTools = true
  1123. if call := getResponseToolCall(&part); call != nil {
  1124. call.SetIndex(len(choice.Delta.ToolCalls))
  1125. choice.Delta.ToolCalls = append(choice.Delta.ToolCalls, *call)
  1126. }
  1127. } else if part.Thought {
  1128. isThought = true
  1129. texts = append(texts, part.Text)
  1130. } else {
  1131. if part.ExecutableCode != nil {
  1132. texts = append(texts, "```"+part.ExecutableCode.Language+"\n"+part.ExecutableCode.Code+"\n```\n")
  1133. } else if part.CodeExecutionResult != nil {
  1134. texts = append(texts, "```output\n"+part.CodeExecutionResult.Output+"\n```\n")
  1135. } else {
  1136. if part.Text != "\n" {
  1137. texts = append(texts, part.Text)
  1138. }
  1139. }
  1140. }
  1141. }
  1142. if isThought {
  1143. choice.Delta.SetReasoningContent(strings.Join(texts, "\n"))
  1144. } else {
  1145. choice.Delta.SetContentString(strings.Join(texts, "\n"))
  1146. }
  1147. if isTools {
  1148. choice.FinishReason = &constant.FinishReasonToolCalls
  1149. }
  1150. choices = append(choices, choice)
  1151. }
  1152. var response dto.ChatCompletionsStreamResponse
  1153. response.Object = "chat.completion.chunk"
  1154. response.Choices = choices
  1155. return &response, isStop
  1156. }
  1157. func handleStream(c *gin.Context, info *relaycommon.RelayInfo, resp *dto.ChatCompletionsStreamResponse) error {
  1158. streamData, err := common.Marshal(resp)
  1159. if err != nil {
  1160. return fmt.Errorf("failed to marshal stream response: %w", err)
  1161. }
  1162. err = openai.HandleStreamFormat(c, info, string(streamData), info.ChannelSetting.ForceFormat, info.ChannelSetting.ThinkingToContent)
  1163. if err != nil {
  1164. return fmt.Errorf("failed to handle stream format: %w", err)
  1165. }
  1166. return nil
  1167. }
  1168. func handleFinalStream(c *gin.Context, info *relaycommon.RelayInfo, resp *dto.ChatCompletionsStreamResponse) error {
  1169. streamData, err := common.Marshal(resp)
  1170. if err != nil {
  1171. return fmt.Errorf("failed to marshal stream response: %w", err)
  1172. }
  1173. openai.HandleFinalResponse(c, info, string(streamData), resp.Id, resp.Created, resp.Model, resp.GetSystemFingerprint(), resp.Usage, false)
  1174. return nil
  1175. }
  1176. func geminiStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response, callback func(data string, geminiResponse *dto.GeminiChatResponse) bool) (*dto.Usage, *types.NewAPIError) {
  1177. var usage = &dto.Usage{}
  1178. var imageCount int
  1179. responseText := strings.Builder{}
  1180. helper.StreamScannerHandler(c, resp, info, func(data string, sr *helper.StreamResult) {
  1181. var geminiResponse dto.GeminiChatResponse
  1182. if err := common.UnmarshalJsonStr(data, &geminiResponse); err != nil {
  1183. sr.Stop(fmt.Errorf("unmarshal: %w", err))
  1184. return
  1185. }
  1186. if len(geminiResponse.Candidates) == 0 && geminiResponse.PromptFeedback != nil && geminiResponse.PromptFeedback.BlockReason != nil {
  1187. common.SetContextKey(c, constant.ContextKeyAdminRejectReason, fmt.Sprintf("gemini_block_reason=%s", *geminiResponse.PromptFeedback.BlockReason))
  1188. }
  1189. // 统计图片数量
  1190. for _, candidate := range geminiResponse.Candidates {
  1191. for _, part := range candidate.Content.Parts {
  1192. if part.InlineData != nil && part.InlineData.MimeType != "" {
  1193. imageCount++
  1194. }
  1195. if part.Text != "" {
  1196. responseText.WriteString(part.Text)
  1197. }
  1198. }
  1199. }
  1200. // 更新使用量统计
  1201. if geminiResponse.UsageMetadata.TotalTokenCount != 0 {
  1202. mappedUsage := buildUsageFromGeminiMetadata(geminiResponse.UsageMetadata, info.GetEstimatePromptTokens())
  1203. *usage = mappedUsage
  1204. }
  1205. if !callback(data, &geminiResponse) {
  1206. sr.Stop(fmt.Errorf("gemini callback stopped"))
  1207. }
  1208. })
  1209. if imageCount != 0 {
  1210. if usage.CompletionTokens == 0 {
  1211. usage.CompletionTokens = imageCount * 1400
  1212. }
  1213. }
  1214. if usage.CompletionTokens <= 0 {
  1215. if info.ReceivedResponseCount > 0 {
  1216. usage = service.ResponseText2Usage(c, responseText.String(), info.UpstreamModelName, info.GetEstimatePromptTokens())
  1217. } else {
  1218. usage = &dto.Usage{}
  1219. }
  1220. }
  1221. return usage, nil
  1222. }
  1223. func GeminiChatStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
  1224. id := helper.GetResponseID(c)
  1225. createAt := common.GetTimestamp()
  1226. finishReason := constant.FinishReasonStop
  1227. toolCallIndexByChoice := make(map[int]map[string]int)
  1228. nextToolCallIndexByChoice := make(map[int]int)
  1229. usage, err := geminiStreamHandler(c, info, resp, func(data string, geminiResponse *dto.GeminiChatResponse) bool {
  1230. response, isStop := streamResponseGeminiChat2OpenAI(geminiResponse)
  1231. response.Id = id
  1232. response.Created = createAt
  1233. response.Model = info.UpstreamModelName
  1234. for choiceIdx := range response.Choices {
  1235. choiceKey := response.Choices[choiceIdx].Index
  1236. for toolIdx := range response.Choices[choiceIdx].Delta.ToolCalls {
  1237. tool := &response.Choices[choiceIdx].Delta.ToolCalls[toolIdx]
  1238. if tool.ID == "" {
  1239. continue
  1240. }
  1241. m := toolCallIndexByChoice[choiceKey]
  1242. if m == nil {
  1243. m = make(map[string]int)
  1244. toolCallIndexByChoice[choiceKey] = m
  1245. }
  1246. if idx, ok := m[tool.ID]; ok {
  1247. tool.SetIndex(idx)
  1248. continue
  1249. }
  1250. idx := nextToolCallIndexByChoice[choiceKey]
  1251. nextToolCallIndexByChoice[choiceKey] = idx + 1
  1252. m[tool.ID] = idx
  1253. tool.SetIndex(idx)
  1254. }
  1255. }
  1256. logger.LogDebug(c, fmt.Sprintf("info.SendResponseCount = %d", info.SendResponseCount))
  1257. if info.SendResponseCount == 0 {
  1258. // send first response
  1259. emptyResponse := helper.GenerateStartEmptyResponse(id, createAt, info.UpstreamModelName, nil)
  1260. if response.IsToolCall() {
  1261. if len(emptyResponse.Choices) > 0 && len(response.Choices) > 0 {
  1262. toolCalls := response.Choices[0].Delta.ToolCalls
  1263. copiedToolCalls := make([]dto.ToolCallResponse, len(toolCalls))
  1264. for idx := range toolCalls {
  1265. copiedToolCalls[idx] = toolCalls[idx]
  1266. copiedToolCalls[idx].Function.Arguments = ""
  1267. }
  1268. emptyResponse.Choices[0].Delta.ToolCalls = copiedToolCalls
  1269. }
  1270. finishReason = constant.FinishReasonToolCalls
  1271. err := handleStream(c, info, emptyResponse)
  1272. if err != nil {
  1273. logger.LogError(c, err.Error())
  1274. }
  1275. response.ClearToolCalls()
  1276. if response.IsFinished() {
  1277. response.Choices[0].FinishReason = nil
  1278. }
  1279. } else {
  1280. err := handleStream(c, info, emptyResponse)
  1281. if err != nil {
  1282. logger.LogError(c, err.Error())
  1283. }
  1284. }
  1285. }
  1286. err := handleStream(c, info, response)
  1287. if err != nil {
  1288. logger.LogError(c, err.Error())
  1289. }
  1290. if isStop {
  1291. _ = handleStream(c, info, helper.GenerateStopResponse(id, createAt, info.UpstreamModelName, finishReason))
  1292. }
  1293. return true
  1294. })
  1295. if err != nil {
  1296. return usage, err
  1297. }
  1298. response := helper.GenerateFinalUsageResponse(id, createAt, info.UpstreamModelName, *usage)
  1299. handleErr := handleFinalStream(c, info, response)
  1300. if handleErr != nil {
  1301. common.SysLog("send final response failed: " + handleErr.Error())
  1302. }
  1303. return usage, nil
  1304. }
  1305. func GeminiChatHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
  1306. responseBody, err := io.ReadAll(resp.Body)
  1307. if err != nil {
  1308. return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1309. }
  1310. service.CloseResponseBodyGracefully(resp)
  1311. if common.DebugEnabled {
  1312. println(string(responseBody))
  1313. }
  1314. var geminiResponse dto.GeminiChatResponse
  1315. err = common.Unmarshal(responseBody, &geminiResponse)
  1316. if err != nil {
  1317. return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1318. }
  1319. if len(geminiResponse.Candidates) == 0 {
  1320. usage := buildUsageFromGeminiMetadata(geminiResponse.UsageMetadata, info.GetEstimatePromptTokens())
  1321. var newAPIError *types.NewAPIError
  1322. if geminiResponse.PromptFeedback != nil && geminiResponse.PromptFeedback.BlockReason != nil {
  1323. common.SetContextKey(c, constant.ContextKeyAdminRejectReason, fmt.Sprintf("gemini_block_reason=%s", *geminiResponse.PromptFeedback.BlockReason))
  1324. newAPIError = types.NewOpenAIError(
  1325. errors.New("request blocked by Gemini API: "+*geminiResponse.PromptFeedback.BlockReason),
  1326. types.ErrorCodePromptBlocked,
  1327. http.StatusBadRequest,
  1328. )
  1329. } else {
  1330. common.SetContextKey(c, constant.ContextKeyAdminRejectReason, "gemini_empty_candidates")
  1331. newAPIError = types.NewOpenAIError(
  1332. errors.New("empty response from Gemini API"),
  1333. types.ErrorCodeEmptyResponse,
  1334. http.StatusInternalServerError,
  1335. )
  1336. }
  1337. service.ResetStatusCode(newAPIError, c.GetString("status_code_mapping"))
  1338. switch info.RelayFormat {
  1339. case types.RelayFormatClaude:
  1340. c.JSON(newAPIError.StatusCode, gin.H{
  1341. "type": "error",
  1342. "error": newAPIError.ToClaudeError(),
  1343. })
  1344. default:
  1345. c.JSON(newAPIError.StatusCode, gin.H{
  1346. "error": newAPIError.ToOpenAIError(),
  1347. })
  1348. }
  1349. return &usage, nil
  1350. }
  1351. fullTextResponse := responseGeminiChat2OpenAI(c, &geminiResponse)
  1352. fullTextResponse.Model = info.UpstreamModelName
  1353. usage := buildUsageFromGeminiMetadata(geminiResponse.UsageMetadata, info.GetEstimatePromptTokens())
  1354. fullTextResponse.Usage = usage
  1355. switch info.RelayFormat {
  1356. case types.RelayFormatOpenAI:
  1357. responseBody, err = common.Marshal(fullTextResponse)
  1358. if err != nil {
  1359. return nil, types.NewError(err, types.ErrorCodeBadResponseBody)
  1360. }
  1361. case types.RelayFormatClaude:
  1362. claudeResp := service.ResponseOpenAI2Claude(fullTextResponse, info)
  1363. claudeRespStr, err := common.Marshal(claudeResp)
  1364. if err != nil {
  1365. return nil, types.NewError(err, types.ErrorCodeBadResponseBody)
  1366. }
  1367. responseBody = claudeRespStr
  1368. case types.RelayFormatGemini:
  1369. break
  1370. }
  1371. service.IOCopyBytesGracefully(c, resp, responseBody)
  1372. return &usage, nil
  1373. }
  1374. func GeminiEmbeddingHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
  1375. defer service.CloseResponseBodyGracefully(resp)
  1376. responseBody, readErr := io.ReadAll(resp.Body)
  1377. if readErr != nil {
  1378. return nil, types.NewOpenAIError(readErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1379. }
  1380. var geminiResponse dto.GeminiBatchEmbeddingResponse
  1381. if jsonErr := common.Unmarshal(responseBody, &geminiResponse); jsonErr != nil {
  1382. return nil, types.NewOpenAIError(jsonErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1383. }
  1384. // convert to openai format response
  1385. openAIResponse := dto.OpenAIEmbeddingResponse{
  1386. Object: "list",
  1387. Data: make([]dto.OpenAIEmbeddingResponseItem, 0, len(geminiResponse.Embeddings)),
  1388. Model: info.UpstreamModelName,
  1389. }
  1390. for i, embedding := range geminiResponse.Embeddings {
  1391. openAIResponse.Data = append(openAIResponse.Data, dto.OpenAIEmbeddingResponseItem{
  1392. Object: "embedding",
  1393. Embedding: embedding.Values,
  1394. Index: i,
  1395. })
  1396. }
  1397. // calculate usage
  1398. // https://ai.google.dev/gemini-api/docs/pricing?hl=zh-cn#text-embedding-004
  1399. // Google has not yet clarified how embedding models will be billed
  1400. // refer to openai billing method to use input tokens billing
  1401. // https://platform.openai.com/docs/guides/embeddings#what-are-embeddings
  1402. usage := service.ResponseText2Usage(c, "", info.UpstreamModelName, info.GetEstimatePromptTokens())
  1403. openAIResponse.Usage = *usage
  1404. jsonResponse, jsonErr := common.Marshal(openAIResponse)
  1405. if jsonErr != nil {
  1406. return nil, types.NewOpenAIError(jsonErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1407. }
  1408. service.IOCopyBytesGracefully(c, resp, jsonResponse)
  1409. return usage, nil
  1410. }
  1411. func GeminiImageHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
  1412. responseBody, readErr := io.ReadAll(resp.Body)
  1413. if readErr != nil {
  1414. return nil, types.NewOpenAIError(readErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1415. }
  1416. _ = resp.Body.Close()
  1417. var geminiResponse dto.GeminiImageResponse
  1418. if jsonErr := common.Unmarshal(responseBody, &geminiResponse); jsonErr != nil {
  1419. return nil, types.NewOpenAIError(jsonErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1420. }
  1421. if len(geminiResponse.Predictions) == 0 {
  1422. return nil, types.NewOpenAIError(errors.New("no images generated"), types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
  1423. }
  1424. // convert to openai format response
  1425. openAIResponse := dto.ImageResponse{
  1426. Created: common.GetTimestamp(),
  1427. Data: make([]dto.ImageData, 0, len(geminiResponse.Predictions)),
  1428. }
  1429. for _, prediction := range geminiResponse.Predictions {
  1430. if prediction.RaiFilteredReason != "" {
  1431. continue // skip filtered image
  1432. }
  1433. openAIResponse.Data = append(openAIResponse.Data, dto.ImageData{
  1434. B64Json: prediction.BytesBase64Encoded,
  1435. })
  1436. }
  1437. jsonResponse, jsonErr := json.Marshal(openAIResponse)
  1438. if jsonErr != nil {
  1439. return nil, types.NewError(jsonErr, types.ErrorCodeBadResponseBody)
  1440. }
  1441. c.Writer.Header().Set("Content-Type", "application/json")
  1442. c.Writer.WriteHeader(resp.StatusCode)
  1443. _, _ = c.Writer.Write(jsonResponse)
  1444. // https://github.com/google-gemini/cookbook/blob/719a27d752aac33f39de18a8d3cb42a70874917e/quickstarts/Counting_Tokens.ipynb
  1445. // each image has fixed 258 tokens
  1446. const imageTokens = 258
  1447. generatedImages := len(openAIResponse.Data)
  1448. usage := &dto.Usage{
  1449. PromptTokens: imageTokens * generatedImages, // each generated image has fixed 258 tokens
  1450. CompletionTokens: 0, // image generation does not calculate completion tokens
  1451. TotalTokens: imageTokens * generatedImages,
  1452. }
  1453. return usage, nil
  1454. }
  1455. type GeminiModelsResponse struct {
  1456. Models []dto.GeminiModel `json:"models"`
  1457. NextPageToken string `json:"nextPageToken"`
  1458. }
  1459. func FetchGeminiModels(baseURL, apiKey, proxyURL string) ([]string, error) {
  1460. client, err := service.GetHttpClientWithProxy(proxyURL)
  1461. if err != nil {
  1462. return nil, fmt.Errorf("创建HTTP客户端失败: %v", err)
  1463. }
  1464. allModels := make([]string, 0)
  1465. nextPageToken := ""
  1466. maxPages := 100 // Safety limit to prevent infinite loops
  1467. for page := 0; page < maxPages; page++ {
  1468. url := fmt.Sprintf("%s/v1beta/models", baseURL)
  1469. if nextPageToken != "" {
  1470. url = fmt.Sprintf("%s?pageToken=%s", url, nextPageToken)
  1471. }
  1472. ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
  1473. request, err := http.NewRequestWithContext(ctx, "GET", url, nil)
  1474. if err != nil {
  1475. cancel()
  1476. return nil, fmt.Errorf("创建请求失败: %v", err)
  1477. }
  1478. request.Header.Set("x-goog-api-key", apiKey)
  1479. response, err := client.Do(request)
  1480. if err != nil {
  1481. cancel()
  1482. return nil, fmt.Errorf("请求失败: %v", err)
  1483. }
  1484. if response.StatusCode != http.StatusOK {
  1485. body, _ := io.ReadAll(response.Body)
  1486. response.Body.Close()
  1487. cancel()
  1488. return nil, fmt.Errorf("服务器返回错误 %d: %s", response.StatusCode, string(body))
  1489. }
  1490. body, err := io.ReadAll(response.Body)
  1491. response.Body.Close()
  1492. cancel()
  1493. if err != nil {
  1494. return nil, fmt.Errorf("读取响应失败: %v", err)
  1495. }
  1496. var modelsResponse GeminiModelsResponse
  1497. if err = common.Unmarshal(body, &modelsResponse); err != nil {
  1498. return nil, fmt.Errorf("解析响应失败: %v", err)
  1499. }
  1500. for _, model := range modelsResponse.Models {
  1501. modelNameValue, ok := model.Name.(string)
  1502. if !ok {
  1503. continue
  1504. }
  1505. modelName := strings.TrimPrefix(modelNameValue, "models/")
  1506. allModels = append(allModels, modelName)
  1507. }
  1508. nextPageToken = modelsResponse.NextPageToken
  1509. if nextPageToken == "" {
  1510. break
  1511. }
  1512. }
  1513. return allModels, nil
  1514. }
  1515. // convertToolChoiceToGeminiConfig converts OpenAI tool_choice to Gemini toolConfig
  1516. // OpenAI tool_choice values:
  1517. // - "auto": Let the model decide (default)
  1518. // - "none": Don't call any tools
  1519. // - "required": Must call at least one tool
  1520. // - {"type": "function", "function": {"name": "xxx"}}: Call specific function
  1521. //
  1522. // Gemini functionCallingConfig.mode values:
  1523. // - "AUTO": Model decides whether to call functions
  1524. // - "NONE": Model won't call functions
  1525. // - "ANY": Model must call at least one function
  1526. func convertToolChoiceToGeminiConfig(toolChoice any) *dto.ToolConfig {
  1527. if toolChoice == nil {
  1528. return nil
  1529. }
  1530. // Handle string values: "auto", "none", "required"
  1531. if toolChoiceStr, ok := toolChoice.(string); ok {
  1532. config := &dto.ToolConfig{
  1533. FunctionCallingConfig: &dto.FunctionCallingConfig{},
  1534. }
  1535. switch toolChoiceStr {
  1536. case "auto":
  1537. config.FunctionCallingConfig.Mode = "AUTO"
  1538. case "none":
  1539. config.FunctionCallingConfig.Mode = "NONE"
  1540. case "required":
  1541. config.FunctionCallingConfig.Mode = "ANY"
  1542. default:
  1543. // Unknown string value, default to AUTO
  1544. config.FunctionCallingConfig.Mode = "AUTO"
  1545. }
  1546. return config
  1547. }
  1548. // Handle object value: {"type": "function", "function": {"name": "xxx"}}
  1549. if toolChoiceMap, ok := toolChoice.(map[string]interface{}); ok {
  1550. if toolChoiceMap["type"] == "function" {
  1551. config := &dto.ToolConfig{
  1552. FunctionCallingConfig: &dto.FunctionCallingConfig{
  1553. Mode: "ANY",
  1554. },
  1555. }
  1556. // Extract function name if specified
  1557. if function, ok := toolChoiceMap["function"].(map[string]interface{}); ok {
  1558. if name, ok := function["name"].(string); ok && name != "" {
  1559. config.FunctionCallingConfig.AllowedFunctionNames = []string{name}
  1560. }
  1561. }
  1562. return config
  1563. }
  1564. // Unsupported map structure (type is not "function"), return nil
  1565. return nil
  1566. }
  1567. // Unsupported type, return nil
  1568. return nil
  1569. }