distributor.go 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. package middleware
  2. import (
  3. "errors"
  4. "fmt"
  5. "net/http"
  6. "one-api/common"
  7. "one-api/constant"
  8. "one-api/dto"
  9. "one-api/model"
  10. relayconstant "one-api/relay/constant"
  11. "one-api/service"
  12. "one-api/setting"
  13. "one-api/setting/ratio_setting"
  14. "strconv"
  15. "strings"
  16. "time"
  17. "github.com/gin-gonic/gin"
  18. )
  19. type ModelRequest struct {
  20. Model string `json:"model"`
  21. }
  22. func Distribute() func(c *gin.Context) {
  23. return func(c *gin.Context) {
  24. allowIpsMap := c.GetStringMap("allow_ips")
  25. if len(allowIpsMap) != 0 {
  26. clientIp := c.ClientIP()
  27. if _, ok := allowIpsMap[clientIp]; !ok {
  28. abortWithOpenAiMessage(c, http.StatusForbidden, "您的 IP 不在令牌允许访问的列表中")
  29. return
  30. }
  31. }
  32. var channel *model.Channel
  33. channelId, ok := c.Get("specific_channel_id")
  34. modelRequest, shouldSelectChannel, err := getModelRequest(c)
  35. if err != nil {
  36. abortWithOpenAiMessage(c, http.StatusBadRequest, "Invalid request, "+err.Error())
  37. return
  38. }
  39. userGroup := c.GetString(constant.ContextKeyUserGroup)
  40. tokenGroup := c.GetString("token_group")
  41. if tokenGroup != "" {
  42. // check common.UserUsableGroups[userGroup]
  43. if _, ok := setting.GetUserUsableGroups(userGroup)[tokenGroup]; !ok {
  44. abortWithOpenAiMessage(c, http.StatusForbidden, fmt.Sprintf("令牌分组 %s 已被禁用", tokenGroup))
  45. return
  46. }
  47. // check group in common.GroupRatio
  48. if !ratio_setting.ContainsGroupRatio(tokenGroup) {
  49. if tokenGroup != "auto" {
  50. abortWithOpenAiMessage(c, http.StatusForbidden, fmt.Sprintf("分组 %s 已被弃用", tokenGroup))
  51. return
  52. }
  53. }
  54. userGroup = tokenGroup
  55. }
  56. c.Set("group", userGroup)
  57. if ok {
  58. id, err := strconv.Atoi(channelId.(string))
  59. if err != nil {
  60. abortWithOpenAiMessage(c, http.StatusBadRequest, "无效的渠道 Id")
  61. return
  62. }
  63. channel, err = model.GetChannelById(id, true)
  64. if err != nil {
  65. abortWithOpenAiMessage(c, http.StatusBadRequest, "无效的渠道 Id")
  66. return
  67. }
  68. if channel.Status != common.ChannelStatusEnabled {
  69. abortWithOpenAiMessage(c, http.StatusForbidden, "该渠道已被禁用")
  70. return
  71. }
  72. } else {
  73. // Select a channel for the user
  74. // check token model mapping
  75. modelLimitEnable := c.GetBool("token_model_limit_enabled")
  76. if modelLimitEnable {
  77. s, ok := c.Get("token_model_limit")
  78. var tokenModelLimit map[string]bool
  79. if ok {
  80. tokenModelLimit = s.(map[string]bool)
  81. } else {
  82. tokenModelLimit = map[string]bool{}
  83. }
  84. if tokenModelLimit != nil {
  85. if _, ok := tokenModelLimit[modelRequest.Model]; !ok {
  86. abortWithOpenAiMessage(c, http.StatusForbidden, "该令牌无权访问模型 "+modelRequest.Model)
  87. return
  88. }
  89. } else {
  90. // token model limit is empty, all models are not allowed
  91. abortWithOpenAiMessage(c, http.StatusForbidden, "该令牌无权访问任何模型")
  92. return
  93. }
  94. }
  95. if shouldSelectChannel {
  96. var selectGroup string
  97. channel, selectGroup, err = model.CacheGetRandomSatisfiedChannel(c, userGroup, modelRequest.Model, 0)
  98. if err != nil {
  99. showGroup := userGroup
  100. if userGroup == "auto" {
  101. showGroup = fmt.Sprintf("auto(%s)", selectGroup)
  102. }
  103. message := fmt.Sprintf("当前分组 %s 下对于模型 %s 无可用渠道", showGroup, modelRequest.Model)
  104. // 如果错误,但是渠道不为空,说明是数据库一致性问题
  105. if channel != nil {
  106. common.SysError(fmt.Sprintf("渠道不存在:%d", channel.Id))
  107. message = "数据库一致性已被破坏,请联系管理员"
  108. }
  109. // 如果错误,而且渠道为空,说明是没有可用渠道
  110. abortWithOpenAiMessage(c, http.StatusServiceUnavailable, message)
  111. return
  112. }
  113. if channel == nil {
  114. abortWithOpenAiMessage(c, http.StatusServiceUnavailable, fmt.Sprintf("当前分组 %s 下对于模型 %s 无可用渠道(数据库一致性已被破坏)", userGroup, modelRequest.Model))
  115. return
  116. }
  117. }
  118. }
  119. c.Set(constant.ContextKeyRequestStartTime, time.Now())
  120. SetupContextForSelectedChannel(c, channel, modelRequest.Model)
  121. c.Next()
  122. }
  123. }
  124. func getModelRequest(c *gin.Context) (*ModelRequest, bool, error) {
  125. var modelRequest ModelRequest
  126. shouldSelectChannel := true
  127. var err error
  128. if strings.Contains(c.Request.URL.Path, "/mj/") {
  129. relayMode := relayconstant.Path2RelayModeMidjourney(c.Request.URL.Path)
  130. if relayMode == relayconstant.RelayModeMidjourneyTaskFetch ||
  131. relayMode == relayconstant.RelayModeMidjourneyTaskFetchByCondition ||
  132. relayMode == relayconstant.RelayModeMidjourneyNotify ||
  133. relayMode == relayconstant.RelayModeMidjourneyTaskImageSeed {
  134. shouldSelectChannel = false
  135. } else {
  136. midjourneyRequest := dto.MidjourneyRequest{}
  137. err = common.UnmarshalBodyReusable(c, &midjourneyRequest)
  138. if err != nil {
  139. return nil, false, err
  140. }
  141. midjourneyModel, mjErr, success := service.GetMjRequestModel(relayMode, &midjourneyRequest)
  142. if mjErr != nil {
  143. return nil, false, fmt.Errorf(mjErr.Description)
  144. }
  145. if midjourneyModel == "" {
  146. if !success {
  147. return nil, false, fmt.Errorf("无效的请求, 无法解析模型")
  148. } else {
  149. // task fetch, task fetch by condition, notify
  150. shouldSelectChannel = false
  151. }
  152. }
  153. modelRequest.Model = midjourneyModel
  154. }
  155. c.Set("relay_mode", relayMode)
  156. } else if strings.Contains(c.Request.URL.Path, "/suno/") {
  157. relayMode := relayconstant.Path2RelaySuno(c.Request.Method, c.Request.URL.Path)
  158. if relayMode == relayconstant.RelayModeSunoFetch ||
  159. relayMode == relayconstant.RelayModeSunoFetchByID {
  160. shouldSelectChannel = false
  161. } else {
  162. modelName := service.CoverTaskActionToModelName(constant.TaskPlatformSuno, c.Param("action"))
  163. modelRequest.Model = modelName
  164. }
  165. c.Set("platform", string(constant.TaskPlatformSuno))
  166. c.Set("relay_mode", relayMode)
  167. } else if strings.HasPrefix(c.Request.URL.Path, "/v1beta/models/") {
  168. // Gemini API 路径处理: /v1beta/models/gemini-2.0-flash:generateContent
  169. relayMode := relayconstant.RelayModeGemini
  170. modelName := extractModelNameFromGeminiPath(c.Request.URL.Path)
  171. if modelName != "" {
  172. modelRequest.Model = modelName
  173. }
  174. c.Set("relay_mode", relayMode)
  175. } else if !strings.HasPrefix(c.Request.URL.Path, "/v1/audio/transcriptions") && !strings.HasPrefix(c.Request.URL.Path, "/v1/images/edits") {
  176. err = common.UnmarshalBodyReusable(c, &modelRequest)
  177. }
  178. if err != nil {
  179. return nil, false, errors.New("无效的请求, " + err.Error())
  180. }
  181. if strings.HasPrefix(c.Request.URL.Path, "/v1/realtime") {
  182. //wss://api.openai.com/v1/realtime?model=gpt-4o-realtime-preview-2024-10-01
  183. modelRequest.Model = c.Query("model")
  184. }
  185. if strings.HasPrefix(c.Request.URL.Path, "/v1/moderations") {
  186. if modelRequest.Model == "" {
  187. modelRequest.Model = "text-moderation-stable"
  188. }
  189. }
  190. if strings.HasSuffix(c.Request.URL.Path, "embeddings") {
  191. if modelRequest.Model == "" {
  192. modelRequest.Model = c.Param("model")
  193. }
  194. }
  195. if strings.HasPrefix(c.Request.URL.Path, "/v1/images/generations") {
  196. modelRequest.Model = common.GetStringIfEmpty(modelRequest.Model, "dall-e")
  197. } else if strings.HasPrefix(c.Request.URL.Path, "/v1/images/edits") {
  198. modelRequest.Model = common.GetStringIfEmpty(c.PostForm("model"), "gpt-image-1")
  199. }
  200. if strings.HasPrefix(c.Request.URL.Path, "/v1/audio") {
  201. relayMode := relayconstant.RelayModeAudioSpeech
  202. if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/speech") {
  203. modelRequest.Model = common.GetStringIfEmpty(modelRequest.Model, "tts-1")
  204. } else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/translations") {
  205. modelRequest.Model = common.GetStringIfEmpty(modelRequest.Model, c.PostForm("model"))
  206. modelRequest.Model = common.GetStringIfEmpty(modelRequest.Model, "whisper-1")
  207. relayMode = relayconstant.RelayModeAudioTranslation
  208. } else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/transcriptions") {
  209. modelRequest.Model = common.GetStringIfEmpty(modelRequest.Model, c.PostForm("model"))
  210. modelRequest.Model = common.GetStringIfEmpty(modelRequest.Model, "whisper-1")
  211. relayMode = relayconstant.RelayModeAudioTranscription
  212. }
  213. c.Set("relay_mode", relayMode)
  214. }
  215. return &modelRequest, shouldSelectChannel, nil
  216. }
  217. func SetupContextForSelectedChannel(c *gin.Context, channel *model.Channel, modelName string) {
  218. c.Set("original_model", modelName) // for retry
  219. if channel == nil {
  220. return
  221. }
  222. c.Set("channel_id", channel.Id)
  223. c.Set("channel_name", channel.Name)
  224. c.Set("channel_type", channel.Type)
  225. c.Set("channel_create_time", channel.CreatedTime)
  226. c.Set("channel_setting", channel.GetSetting())
  227. c.Set("param_override", channel.GetParamOverride())
  228. if nil != channel.OpenAIOrganization && "" != *channel.OpenAIOrganization {
  229. c.Set("channel_organization", *channel.OpenAIOrganization)
  230. }
  231. c.Set("auto_ban", channel.GetAutoBan())
  232. c.Set("model_mapping", channel.GetModelMapping())
  233. c.Set("status_code_mapping", channel.GetStatusCodeMapping())
  234. c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key))
  235. c.Set("base_url", channel.GetBaseURL())
  236. // TODO: api_version统一
  237. switch channel.Type {
  238. case common.ChannelTypeAzure:
  239. c.Set("api_version", channel.Other)
  240. case common.ChannelTypeVertexAi:
  241. c.Set("region", channel.Other)
  242. case common.ChannelTypeXunfei:
  243. c.Set("api_version", channel.Other)
  244. case common.ChannelTypeGemini:
  245. c.Set("api_version", channel.Other)
  246. case common.ChannelTypeAli:
  247. c.Set("plugin", channel.Other)
  248. case common.ChannelCloudflare:
  249. c.Set("api_version", channel.Other)
  250. case common.ChannelTypeMokaAI:
  251. c.Set("api_version", channel.Other)
  252. case common.ChannelTypeCoze:
  253. c.Set("bot_id", channel.Other)
  254. }
  255. }
  256. // extractModelNameFromGeminiPath 从 Gemini API URL 路径中提取模型名
  257. // 输入格式: /v1beta/models/gemini-2.0-flash:generateContent
  258. // 输出: gemini-2.0-flash
  259. func extractModelNameFromGeminiPath(path string) string {
  260. // 查找 "/models/" 的位置
  261. modelsPrefix := "/models/"
  262. modelsIndex := strings.Index(path, modelsPrefix)
  263. if modelsIndex == -1 {
  264. return ""
  265. }
  266. // 从 "/models/" 之后开始提取
  267. startIndex := modelsIndex + len(modelsPrefix)
  268. if startIndex >= len(path) {
  269. return ""
  270. }
  271. // 查找 ":" 的位置,模型名在 ":" 之前
  272. colonIndex := strings.Index(path[startIndex:], ":")
  273. if colonIndex == -1 {
  274. // 如果没有找到 ":",返回从 "/models/" 到路径结尾的部分
  275. return path[startIndex:]
  276. }
  277. // 返回模型名部分
  278. return path[startIndex : startIndex+colonIndex]
  279. }