relay-text.go 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752
  1. package controller
  2. import (
  3. "bytes"
  4. "context"
  5. "encoding/json"
  6. "errors"
  7. "fmt"
  8. "io"
  9. "net/http"
  10. "one-api/common"
  11. "one-api/model"
  12. "strings"
  13. "time"
  14. "github.com/gin-gonic/gin"
  15. )
  16. const (
  17. APITypeOpenAI = iota
  18. APITypeClaude
  19. APITypePaLM
  20. APITypeBaidu
  21. APITypeZhipu
  22. APITypeAli
  23. APITypeXunfei
  24. APITypeAIProxyLibrary
  25. APITypeTencent
  26. APITypeGemini
  27. )
  28. var httpClient *http.Client
  29. var impatientHTTPClient *http.Client
  30. func init() {
  31. if common.RelayTimeout == 0 {
  32. httpClient = &http.Client{}
  33. } else {
  34. httpClient = &http.Client{
  35. Timeout: time.Duration(common.RelayTimeout) * time.Second,
  36. }
  37. }
  38. impatientHTTPClient = &http.Client{
  39. Timeout: 5 * time.Second,
  40. }
  41. }
  42. func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
  43. channelType := c.GetInt("channel")
  44. channelId := c.GetInt("channel_id")
  45. tokenId := c.GetInt("token_id")
  46. userId := c.GetInt("id")
  47. group := c.GetString("group")
  48. tokenUnlimited := c.GetBool("token_unlimited_quota")
  49. startTime := time.Now()
  50. var textRequest GeneralOpenAIRequest
  51. err := common.UnmarshalBodyReusable(c, &textRequest)
  52. if err != nil {
  53. return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
  54. }
  55. if relayMode == RelayModeModerations && textRequest.Model == "" {
  56. textRequest.Model = "text-moderation-latest"
  57. }
  58. if relayMode == RelayModeEmbeddings && textRequest.Model == "" {
  59. textRequest.Model = c.Param("model")
  60. }
  61. // request validation
  62. if textRequest.Model == "" {
  63. return errorWrapper(errors.New("model is required"), "required_field_missing", http.StatusBadRequest)
  64. }
  65. switch relayMode {
  66. case RelayModeCompletions:
  67. if textRequest.Prompt == "" {
  68. return errorWrapper(errors.New("field prompt is required"), "required_field_missing", http.StatusBadRequest)
  69. }
  70. case RelayModeChatCompletions:
  71. if textRequest.Messages == nil || len(textRequest.Messages) == 0 {
  72. return errorWrapper(errors.New("field messages is required"), "required_field_missing", http.StatusBadRequest)
  73. }
  74. case RelayModeEmbeddings:
  75. case RelayModeModerations:
  76. if textRequest.Input == "" {
  77. return errorWrapper(errors.New("field input is required"), "required_field_missing", http.StatusBadRequest)
  78. }
  79. case RelayModeEdits:
  80. if textRequest.Instruction == "" {
  81. return errorWrapper(errors.New("field instruction is required"), "required_field_missing", http.StatusBadRequest)
  82. }
  83. }
  84. // map model name
  85. modelMapping := c.GetString("model_mapping")
  86. isModelMapped := false
  87. if modelMapping != "" && modelMapping != "{}" {
  88. modelMap := make(map[string]string)
  89. err := json.Unmarshal([]byte(modelMapping), &modelMap)
  90. if err != nil {
  91. return errorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError)
  92. }
  93. if modelMap[textRequest.Model] != "" {
  94. textRequest.Model = modelMap[textRequest.Model]
  95. isModelMapped = true
  96. }
  97. }
  98. apiType := APITypeOpenAI
  99. switch channelType {
  100. case common.ChannelTypeAnthropic:
  101. apiType = APITypeClaude
  102. case common.ChannelTypeBaidu:
  103. apiType = APITypeBaidu
  104. case common.ChannelTypePaLM:
  105. apiType = APITypePaLM
  106. case common.ChannelTypeZhipu:
  107. apiType = APITypeZhipu
  108. case common.ChannelTypeAli:
  109. apiType = APITypeAli
  110. case common.ChannelTypeXunfei:
  111. apiType = APITypeXunfei
  112. case common.ChannelTypeAIProxyLibrary:
  113. apiType = APITypeAIProxyLibrary
  114. case common.ChannelTypeTencent:
  115. apiType = APITypeTencent
  116. case common.ChannelTypeGemini:
  117. apiType = APITypeGemini
  118. }
  119. baseURL := common.ChannelBaseURLs[channelType]
  120. requestURL := c.Request.URL.String()
  121. if c.GetString("base_url") != "" {
  122. baseURL = c.GetString("base_url")
  123. }
  124. fullRequestURL := getFullRequestURL(baseURL, requestURL, channelType)
  125. switch apiType {
  126. case APITypeOpenAI:
  127. if channelType == common.ChannelTypeAzure {
  128. // https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api
  129. query := c.Request.URL.Query()
  130. apiVersion := query.Get("api-version")
  131. if apiVersion == "" {
  132. apiVersion = c.GetString("api_version")
  133. }
  134. requestURL := strings.Split(requestURL, "?")[0]
  135. requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, apiVersion)
  136. baseURL = c.GetString("base_url")
  137. task := strings.TrimPrefix(requestURL, "/v1/")
  138. model_ := textRequest.Model
  139. model_ = strings.Replace(model_, ".", "", -1)
  140. // https://github.com/songquanpeng/one-api/issues/67
  141. model_ = strings.TrimSuffix(model_, "-0301")
  142. model_ = strings.TrimSuffix(model_, "-0314")
  143. model_ = strings.TrimSuffix(model_, "-0613")
  144. requestURL = fmt.Sprintf("/openai/deployments/%s/%s", model_, task)
  145. fullRequestURL = getFullRequestURL(baseURL, requestURL, channelType)
  146. }
  147. case APITypeClaude:
  148. fullRequestURL = "https://api.anthropic.com/v1/complete"
  149. if baseURL != "" {
  150. fullRequestURL = fmt.Sprintf("%s/v1/complete", baseURL)
  151. }
  152. case APITypeBaidu:
  153. switch textRequest.Model {
  154. case "ERNIE-Bot":
  155. fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions"
  156. case "ERNIE-Bot-turbo":
  157. fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/eb-instant"
  158. case "ERNIE-Bot-4":
  159. fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro"
  160. case "BLOOMZ-7B":
  161. fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/bloomz_7b1"
  162. case "Embedding-V1":
  163. fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings/embedding-v1"
  164. }
  165. apiKey := c.Request.Header.Get("Authorization")
  166. apiKey = strings.TrimPrefix(apiKey, "Bearer ")
  167. var err error
  168. if apiKey, err = getBaiduAccessToken(apiKey); err != nil {
  169. return errorWrapper(err, "invalid_baidu_config", http.StatusInternalServerError)
  170. }
  171. fullRequestURL += "?access_token=" + apiKey
  172. case APITypePaLM:
  173. fullRequestURL = "https://generativelanguage.googleapis.com/v1beta2/models/chat-bison-001:generateMessage"
  174. if baseURL != "" {
  175. fullRequestURL = fmt.Sprintf("%s/v1beta2/models/chat-bison-001:generateMessage", baseURL)
  176. }
  177. apiKey := c.Request.Header.Get("Authorization")
  178. apiKey = strings.TrimPrefix(apiKey, "Bearer ")
  179. fullRequestURL += "?key=" + apiKey
  180. case APITypeGemini:
  181. requestBaseURL := "https://generativelanguage.googleapis.com"
  182. if baseURL != "" {
  183. requestBaseURL = baseURL
  184. }
  185. version := "v1beta"
  186. if c.GetString("api_version") != "" {
  187. version = c.GetString("api_version")
  188. }
  189. action := "generateContent"
  190. if textRequest.Stream {
  191. action = "streamGenerateContent"
  192. }
  193. fullRequestURL = fmt.Sprintf("%s/%s/models/%s:%s", requestBaseURL, version, textRequest.Model, action)
  194. apiKey := c.Request.Header.Get("Authorization")
  195. apiKey = strings.TrimPrefix(apiKey, "Bearer ")
  196. fullRequestURL += "?key=" + apiKey
  197. //log.Println(fullRequestURL)
  198. case APITypeZhipu:
  199. method := "invoke"
  200. if textRequest.Stream {
  201. method = "sse-invoke"
  202. }
  203. fullRequestURL = fmt.Sprintf("https://open.bigmodel.cn/api/paas/v3/model-api/%s/%s", textRequest.Model, method)
  204. case APITypeAli:
  205. fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation"
  206. if relayMode == RelayModeEmbeddings {
  207. fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/embeddings/text-embedding/text-embedding"
  208. }
  209. case APITypeTencent:
  210. fullRequestURL = "https://hunyuan.cloud.tencent.com/hyllm/v1/chat/completions"
  211. case APITypeAIProxyLibrary:
  212. fullRequestURL = fmt.Sprintf("%s/api/library/ask", baseURL)
  213. }
  214. var promptTokens int
  215. var completionTokens int
  216. switch relayMode {
  217. case RelayModeChatCompletions:
  218. promptTokens, err = countTokenMessages(textRequest.Messages, textRequest.Model)
  219. if err != nil {
  220. return errorWrapper(err, "count_token_messages_failed", http.StatusInternalServerError)
  221. }
  222. case RelayModeCompletions:
  223. promptTokens = countTokenInput(textRequest.Prompt, textRequest.Model)
  224. case RelayModeModerations:
  225. promptTokens = countTokenInput(textRequest.Input, textRequest.Model)
  226. }
  227. modelPrice := common.GetModelPrice(textRequest.Model, false)
  228. groupRatio := common.GetGroupRatio(group)
  229. var preConsumedQuota int
  230. var ratio float64
  231. var modelRatio float64
  232. if modelPrice == -1 {
  233. preConsumedTokens := common.PreConsumedQuota
  234. if textRequest.MaxTokens != 0 {
  235. preConsumedTokens = promptTokens + int(textRequest.MaxTokens)
  236. }
  237. modelRatio = common.GetModelRatio(textRequest.Model)
  238. ratio = modelRatio * groupRatio
  239. preConsumedQuota = int(float64(preConsumedTokens) * ratio)
  240. } else {
  241. preConsumedQuota = int(modelPrice * common.QuotaPerUnit * groupRatio)
  242. }
  243. userQuota, err := model.CacheGetUserQuota(userId)
  244. if err != nil {
  245. return errorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError)
  246. }
  247. if userQuota < 0 || userQuota-preConsumedQuota < 0 {
  248. return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
  249. }
  250. err = model.CacheDecreaseUserQuota(userId, preConsumedQuota)
  251. if err != nil {
  252. return errorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError)
  253. }
  254. if userQuota > 100*preConsumedQuota {
  255. // 用户额度充足,判断令牌额度是否充足
  256. if !tokenUnlimited {
  257. // 非无限令牌,判断令牌额度是否充足
  258. tokenQuota := c.GetInt("token_quota")
  259. if tokenQuota > 100*preConsumedQuota {
  260. // 令牌额度充足,信任令牌
  261. preConsumedQuota = 0
  262. common.LogInfo(c.Request.Context(), fmt.Sprintf("user %d quota %d and token %d quota %d are enough, trusted and no need to pre-consume", userId, userQuota, tokenId, tokenQuota))
  263. }
  264. } else {
  265. // in this case, we do not pre-consume quota
  266. // because the user has enough quota
  267. preConsumedQuota = 0
  268. common.LogInfo(c.Request.Context(), fmt.Sprintf("user %d with unlimited token has enough quota %d, trusted and no need to pre-consume", userId, userQuota))
  269. }
  270. }
  271. if preConsumedQuota > 0 {
  272. userQuota, err = model.PreConsumeTokenQuota(tokenId, preConsumedQuota)
  273. if err != nil {
  274. return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
  275. }
  276. }
  277. var requestBody io.Reader
  278. if isModelMapped {
  279. jsonStr, err := json.Marshal(textRequest)
  280. if err != nil {
  281. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  282. }
  283. requestBody = bytes.NewBuffer(jsonStr)
  284. } else {
  285. requestBody = c.Request.Body
  286. }
  287. switch apiType {
  288. case APITypeClaude:
  289. claudeRequest := requestOpenAI2Claude(textRequest)
  290. jsonStr, err := json.Marshal(claudeRequest)
  291. if err != nil {
  292. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  293. }
  294. requestBody = bytes.NewBuffer(jsonStr)
  295. case APITypeBaidu:
  296. var jsonData []byte
  297. var err error
  298. switch relayMode {
  299. case RelayModeEmbeddings:
  300. baiduEmbeddingRequest := embeddingRequestOpenAI2Baidu(textRequest)
  301. jsonData, err = json.Marshal(baiduEmbeddingRequest)
  302. default:
  303. baiduRequest := requestOpenAI2Baidu(textRequest)
  304. jsonData, err = json.Marshal(baiduRequest)
  305. }
  306. if err != nil {
  307. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  308. }
  309. requestBody = bytes.NewBuffer(jsonData)
  310. case APITypePaLM:
  311. palmRequest := requestOpenAI2PaLM(textRequest)
  312. jsonStr, err := json.Marshal(palmRequest)
  313. if err != nil {
  314. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  315. }
  316. requestBody = bytes.NewBuffer(jsonStr)
  317. case APITypeGemini:
  318. geminiChatRequest := requestOpenAI2Gemini(textRequest)
  319. jsonStr, err := json.Marshal(geminiChatRequest)
  320. if err != nil {
  321. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  322. }
  323. requestBody = bytes.NewBuffer(jsonStr)
  324. case APITypeZhipu:
  325. zhipuRequest := requestOpenAI2Zhipu(textRequest)
  326. jsonStr, err := json.Marshal(zhipuRequest)
  327. if err != nil {
  328. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  329. }
  330. requestBody = bytes.NewBuffer(jsonStr)
  331. case APITypeAli:
  332. var jsonStr []byte
  333. var err error
  334. switch relayMode {
  335. case RelayModeEmbeddings:
  336. aliEmbeddingRequest := embeddingRequestOpenAI2Ali(textRequest)
  337. jsonStr, err = json.Marshal(aliEmbeddingRequest)
  338. default:
  339. aliRequest := requestOpenAI2Ali(textRequest)
  340. jsonStr, err = json.Marshal(aliRequest)
  341. }
  342. if err != nil {
  343. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  344. }
  345. requestBody = bytes.NewBuffer(jsonStr)
  346. case APITypeTencent:
  347. apiKey := c.Request.Header.Get("Authorization")
  348. apiKey = strings.TrimPrefix(apiKey, "Bearer ")
  349. appId, secretId, secretKey, err := parseTencentConfig(apiKey)
  350. if err != nil {
  351. return errorWrapper(err, "invalid_tencent_config", http.StatusInternalServerError)
  352. }
  353. tencentRequest := requestOpenAI2Tencent(textRequest)
  354. tencentRequest.AppId = appId
  355. tencentRequest.SecretId = secretId
  356. jsonStr, err := json.Marshal(tencentRequest)
  357. if err != nil {
  358. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  359. }
  360. sign := getTencentSign(*tencentRequest, secretKey)
  361. c.Request.Header.Set("Authorization", sign)
  362. requestBody = bytes.NewBuffer(jsonStr)
  363. case APITypeAIProxyLibrary:
  364. aiProxyLibraryRequest := requestOpenAI2AIProxyLibrary(textRequest)
  365. aiProxyLibraryRequest.LibraryId = c.GetString("library_id")
  366. jsonStr, err := json.Marshal(aiProxyLibraryRequest)
  367. if err != nil {
  368. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  369. }
  370. requestBody = bytes.NewBuffer(jsonStr)
  371. }
  372. var req *http.Request
  373. var resp *http.Response
  374. isStream := textRequest.Stream
  375. if apiType != APITypeXunfei { // cause xunfei use websocket
  376. req, err = http.NewRequest(c.Request.Method, fullRequestURL, requestBody)
  377. // 设置GetBody函数,该函数返回一个新的io.ReadCloser,该io.ReadCloser返回与原始请求体相同的数据
  378. req.GetBody = func() (io.ReadCloser, error) {
  379. return io.NopCloser(requestBody), nil
  380. }
  381. if err != nil {
  382. return errorWrapper(err, "new_request_failed", http.StatusInternalServerError)
  383. }
  384. apiKey := c.Request.Header.Get("Authorization")
  385. apiKey = strings.TrimPrefix(apiKey, "Bearer ")
  386. switch apiType {
  387. case APITypeOpenAI:
  388. if channelType == common.ChannelTypeAzure {
  389. req.Header.Set("api-key", apiKey)
  390. } else {
  391. req.Header.Set("Authorization", c.Request.Header.Get("Authorization"))
  392. if c.Request.Header.Get("OpenAI-Organization") != "" {
  393. req.Header.Set("OpenAI-Organization", c.Request.Header.Get("OpenAI-Organization"))
  394. }
  395. if channelType == common.ChannelTypeOpenRouter {
  396. req.Header.Set("HTTP-Referer", "https://github.com/songquanpeng/one-api")
  397. req.Header.Set("X-Title", "One API")
  398. }
  399. }
  400. case APITypeClaude:
  401. req.Header.Set("x-api-key", apiKey)
  402. anthropicVersion := c.Request.Header.Get("anthropic-version")
  403. if anthropicVersion == "" {
  404. anthropicVersion = "2023-06-01"
  405. }
  406. req.Header.Set("anthropic-version", anthropicVersion)
  407. case APITypeZhipu:
  408. token := getZhipuToken(apiKey)
  409. req.Header.Set("Authorization", token)
  410. case APITypeAli:
  411. req.Header.Set("Authorization", "Bearer "+apiKey)
  412. if textRequest.Stream {
  413. req.Header.Set("X-DashScope-SSE", "enable")
  414. }
  415. case APITypeTencent:
  416. req.Header.Set("Authorization", apiKey)
  417. case APITypeGemini:
  418. req.Header.Set("Content-Type", "application/json")
  419. default:
  420. req.Header.Set("Authorization", "Bearer "+apiKey)
  421. }
  422. if apiType != APITypeGemini {
  423. // 设置公共头部...
  424. req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type"))
  425. req.Header.Set("Accept", c.Request.Header.Get("Accept"))
  426. if isStream && c.Request.Header.Get("Accept") == "" {
  427. req.Header.Set("Accept", "text/event-stream")
  428. }
  429. }
  430. //req.HeaderBar.Set("Connection", c.Request.HeaderBar.Get("Connection"))
  431. resp, err = httpClient.Do(req)
  432. if err != nil {
  433. return errorWrapper(err, "do_request_failed", http.StatusInternalServerError)
  434. }
  435. err = req.Body.Close()
  436. if err != nil {
  437. return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
  438. }
  439. err = c.Request.Body.Close()
  440. if err != nil {
  441. return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
  442. }
  443. isStream = isStream || strings.HasPrefix(resp.Header.Get("Content-Type"), "text/event-stream")
  444. if resp.StatusCode != http.StatusOK {
  445. if preConsumedQuota != 0 {
  446. go func(ctx context.Context) {
  447. // return pre-consumed quota
  448. err := model.PostConsumeTokenQuota(tokenId, userQuota, -preConsumedQuota, 0, false)
  449. if err != nil {
  450. common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
  451. }
  452. }(c.Request.Context())
  453. }
  454. return relayErrorHandler(resp)
  455. }
  456. }
  457. var textResponse TextResponse
  458. tokenName := c.GetString("token_name")
  459. defer func(ctx context.Context) {
  460. // c.Writer.Flush()
  461. go func() {
  462. useTimeSeconds := time.Now().Unix() - startTime.Unix()
  463. promptTokens = textResponse.Usage.PromptTokens
  464. completionTokens = textResponse.Usage.CompletionTokens
  465. quota := 0
  466. if modelPrice == -1 {
  467. completionRatio := common.GetCompletionRatio(textRequest.Model)
  468. quota = promptTokens + int(float64(completionTokens)*completionRatio)
  469. quota = int(float64(quota) * ratio)
  470. if ratio != 0 && quota <= 0 {
  471. quota = 1
  472. }
  473. } else {
  474. quota = int(modelPrice * common.QuotaPerUnit * groupRatio)
  475. }
  476. totalTokens := promptTokens + completionTokens
  477. var logContent string
  478. if modelPrice == -1 {
  479. logContent = fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
  480. } else {
  481. logContent = fmt.Sprintf("模型价格 %.2f,分组倍率 %.2f", modelPrice, groupRatio)
  482. }
  483. // record all the consume log even if quota is 0
  484. if totalTokens == 0 {
  485. // in this case, must be some error happened
  486. // we cannot just return, because we may have to return the pre-consumed quota
  487. quota = 0
  488. logContent += fmt.Sprintf("(有疑问请联系管理员)")
  489. common.LogError(ctx, fmt.Sprintf("total tokens is 0, cannot consume quota, userId %d, channelId %d, tokenId %d, model %s, pre-consumed quota %d", userId, channelId, tokenId, textRequest.Model, preConsumedQuota))
  490. } else {
  491. quotaDelta := quota - preConsumedQuota
  492. err := model.PostConsumeTokenQuota(tokenId, userQuota, quotaDelta, preConsumedQuota, true)
  493. if err != nil {
  494. common.LogError(ctx, "error consuming token remain quota: "+err.Error())
  495. }
  496. err = model.CacheUpdateUserQuota(userId)
  497. if err != nil {
  498. common.LogError(ctx, "error update user quota cache: "+err.Error())
  499. }
  500. model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
  501. model.UpdateChannelUsedQuota(channelId, quota)
  502. }
  503. logModel := textRequest.Model
  504. if strings.HasPrefix(logModel, "gpt-4-gizmo") {
  505. logModel = "gpt-4-gizmo-*"
  506. logContent += fmt.Sprintf(",模型 %s", textRequest.Model)
  507. }
  508. model.RecordConsumeLog(ctx, userId, channelId, promptTokens, completionTokens, logModel, tokenName, quota, logContent, tokenId, userQuota, int(useTimeSeconds), isStream)
  509. //if quota != 0 {
  510. //
  511. //}
  512. }()
  513. }(c.Request.Context())
  514. switch apiType {
  515. case APITypeOpenAI:
  516. if isStream {
  517. err, responseText := openaiStreamHandler(c, resp, relayMode)
  518. if err != nil {
  519. return err
  520. }
  521. textResponse.Usage.PromptTokens = promptTokens
  522. textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
  523. return nil
  524. } else {
  525. err, usage := openaiHandler(c, resp, promptTokens, textRequest.Model)
  526. if err != nil {
  527. return err
  528. }
  529. if usage != nil {
  530. textResponse.Usage = *usage
  531. }
  532. return nil
  533. }
  534. case APITypeClaude:
  535. if isStream {
  536. err, responseText := claudeStreamHandler(c, resp)
  537. if err != nil {
  538. return err
  539. }
  540. textResponse.Usage.PromptTokens = promptTokens
  541. textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
  542. return nil
  543. } else {
  544. err, usage := claudeHandler(c, resp, promptTokens, textRequest.Model)
  545. if err != nil {
  546. return err
  547. }
  548. if usage != nil {
  549. textResponse.Usage = *usage
  550. }
  551. return nil
  552. }
  553. case APITypeBaidu:
  554. if isStream {
  555. err, usage := baiduStreamHandler(c, resp)
  556. if err != nil {
  557. return err
  558. }
  559. if usage != nil {
  560. textResponse.Usage = *usage
  561. }
  562. return nil
  563. } else {
  564. var err *OpenAIErrorWithStatusCode
  565. var usage *Usage
  566. switch relayMode {
  567. case RelayModeEmbeddings:
  568. err, usage = baiduEmbeddingHandler(c, resp)
  569. default:
  570. err, usage = baiduHandler(c, resp)
  571. }
  572. if err != nil {
  573. return err
  574. }
  575. if usage != nil {
  576. textResponse.Usage = *usage
  577. }
  578. return nil
  579. }
  580. case APITypePaLM:
  581. if textRequest.Stream { // PaLM2 API does not support stream
  582. err, responseText := palmStreamHandler(c, resp)
  583. if err != nil {
  584. return err
  585. }
  586. textResponse.Usage.PromptTokens = promptTokens
  587. textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
  588. return nil
  589. } else {
  590. err, usage := palmHandler(c, resp, promptTokens, textRequest.Model)
  591. if err != nil {
  592. return err
  593. }
  594. if usage != nil {
  595. textResponse.Usage = *usage
  596. }
  597. return nil
  598. }
  599. case APITypeGemini:
  600. if textRequest.Stream {
  601. err, responseText := geminiChatStreamHandler(c, resp)
  602. if err != nil {
  603. return err
  604. }
  605. textResponse.Usage.PromptTokens = promptTokens
  606. textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
  607. return nil
  608. } else {
  609. err, usage := geminiChatHandler(c, resp, promptTokens, textRequest.Model)
  610. if err != nil {
  611. return err
  612. }
  613. if usage != nil {
  614. textResponse.Usage = *usage
  615. }
  616. return nil
  617. }
  618. case APITypeZhipu:
  619. if isStream {
  620. err, usage := zhipuStreamHandler(c, resp)
  621. if err != nil {
  622. return err
  623. }
  624. if usage != nil {
  625. textResponse.Usage = *usage
  626. }
  627. // zhipu's API does not return prompt tokens & completion tokens
  628. textResponse.Usage.PromptTokens = textResponse.Usage.TotalTokens
  629. return nil
  630. } else {
  631. err, usage := zhipuHandler(c, resp)
  632. if err != nil {
  633. return err
  634. }
  635. if usage != nil {
  636. textResponse.Usage = *usage
  637. }
  638. // zhipu's API does not return prompt tokens & completion tokens
  639. textResponse.Usage.PromptTokens = textResponse.Usage.TotalTokens
  640. return nil
  641. }
  642. case APITypeAli:
  643. if isStream {
  644. err, usage := aliStreamHandler(c, resp)
  645. if err != nil {
  646. return err
  647. }
  648. if usage != nil {
  649. textResponse.Usage = *usage
  650. }
  651. return nil
  652. } else {
  653. var err *OpenAIErrorWithStatusCode
  654. var usage *Usage
  655. switch relayMode {
  656. case RelayModeEmbeddings:
  657. err, usage = aliEmbeddingHandler(c, resp)
  658. default:
  659. err, usage = aliHandler(c, resp)
  660. }
  661. if err != nil {
  662. return err
  663. }
  664. if usage != nil {
  665. textResponse.Usage = *usage
  666. }
  667. return nil
  668. }
  669. case APITypeXunfei:
  670. auth := c.Request.Header.Get("Authorization")
  671. auth = strings.TrimPrefix(auth, "Bearer ")
  672. splits := strings.Split(auth, "|")
  673. if len(splits) != 3 {
  674. return errorWrapper(errors.New("invalid auth"), "invalid_auth", http.StatusBadRequest)
  675. }
  676. var err *OpenAIErrorWithStatusCode
  677. var usage *Usage
  678. if isStream {
  679. err, usage = xunfeiStreamHandler(c, textRequest, splits[0], splits[1], splits[2])
  680. } else {
  681. err, usage = xunfeiHandler(c, textRequest, splits[0], splits[1], splits[2])
  682. }
  683. if err != nil {
  684. return err
  685. }
  686. if usage != nil {
  687. textResponse.Usage = *usage
  688. }
  689. return nil
  690. case APITypeAIProxyLibrary:
  691. if isStream {
  692. err, usage := aiProxyLibraryStreamHandler(c, resp)
  693. if err != nil {
  694. return err
  695. }
  696. if usage != nil {
  697. textResponse.Usage = *usage
  698. }
  699. return nil
  700. } else {
  701. err, usage := aiProxyLibraryHandler(c, resp)
  702. if err != nil {
  703. return err
  704. }
  705. if usage != nil {
  706. textResponse.Usage = *usage
  707. }
  708. return nil
  709. }
  710. case APITypeTencent:
  711. if isStream {
  712. err, responseText := tencentStreamHandler(c, resp)
  713. if err != nil {
  714. return err
  715. }
  716. textResponse.Usage.PromptTokens = promptTokens
  717. textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
  718. return nil
  719. } else {
  720. err, usage := tencentHandler(c, resp)
  721. if err != nil {
  722. return err
  723. }
  724. if usage != nil {
  725. textResponse.Usage = *usage
  726. }
  727. return nil
  728. }
  729. default:
  730. return errorWrapper(errors.New("unknown api type"), "unknown_api_type", http.StatusInternalServerError)
  731. }
  732. }