relay-text.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704
  1. package controller
  2. import (
  3. "bytes"
  4. "context"
  5. "encoding/json"
  6. "errors"
  7. "fmt"
  8. "io"
  9. "net/http"
  10. "one-api/common"
  11. "one-api/model"
  12. "strings"
  13. "time"
  14. "github.com/gin-gonic/gin"
  15. )
  16. const (
  17. APITypeOpenAI = iota
  18. APITypeClaude
  19. APITypePaLM
  20. APITypeBaidu
  21. APITypeZhipu
  22. APITypeAli
  23. APITypeXunfei
  24. APITypeAIProxyLibrary
  25. APITypeTencent
  26. APITypeGemini
  27. )
  28. var httpClient *http.Client
  29. var impatientHTTPClient *http.Client
  30. func init() {
  31. if common.RelayTimeout == 0 {
  32. httpClient = &http.Client{}
  33. } else {
  34. httpClient = &http.Client{
  35. Timeout: time.Duration(common.RelayTimeout) * time.Second,
  36. }
  37. }
  38. impatientHTTPClient = &http.Client{
  39. Timeout: 5 * time.Second,
  40. }
  41. }
  42. func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
  43. channelType := c.GetInt("channel")
  44. channelId := c.GetInt("channel_id")
  45. tokenId := c.GetInt("token_id")
  46. userId := c.GetInt("id")
  47. group := c.GetString("group")
  48. startTime := time.Now()
  49. var textRequest GeneralOpenAIRequest
  50. err := common.UnmarshalBodyReusable(c, &textRequest)
  51. if err != nil {
  52. return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
  53. }
  54. if relayMode == RelayModeModerations && textRequest.Model == "" {
  55. textRequest.Model = "text-moderation-latest"
  56. }
  57. if relayMode == RelayModeEmbeddings && textRequest.Model == "" {
  58. textRequest.Model = c.Param("model")
  59. }
  60. // request validation
  61. if textRequest.Model == "" {
  62. return errorWrapper(errors.New("model is required"), "required_field_missing", http.StatusBadRequest)
  63. }
  64. switch relayMode {
  65. case RelayModeCompletions:
  66. if textRequest.Prompt == "" {
  67. return errorWrapper(errors.New("field prompt is required"), "required_field_missing", http.StatusBadRequest)
  68. }
  69. case RelayModeChatCompletions:
  70. if textRequest.Messages == nil || len(textRequest.Messages) == 0 {
  71. return errorWrapper(errors.New("field messages is required"), "required_field_missing", http.StatusBadRequest)
  72. }
  73. case RelayModeEmbeddings:
  74. case RelayModeModerations:
  75. if textRequest.Input == "" {
  76. return errorWrapper(errors.New("field input is required"), "required_field_missing", http.StatusBadRequest)
  77. }
  78. case RelayModeEdits:
  79. if textRequest.Instruction == "" {
  80. return errorWrapper(errors.New("field instruction is required"), "required_field_missing", http.StatusBadRequest)
  81. }
  82. }
  83. // map model name
  84. modelMapping := c.GetString("model_mapping")
  85. isModelMapped := false
  86. if modelMapping != "" && modelMapping != "{}" {
  87. modelMap := make(map[string]string)
  88. err := json.Unmarshal([]byte(modelMapping), &modelMap)
  89. if err != nil {
  90. return errorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError)
  91. }
  92. if modelMap[textRequest.Model] != "" {
  93. textRequest.Model = modelMap[textRequest.Model]
  94. isModelMapped = true
  95. }
  96. }
  97. apiType := APITypeOpenAI
  98. switch channelType {
  99. case common.ChannelTypeAnthropic:
  100. apiType = APITypeClaude
  101. case common.ChannelTypeBaidu:
  102. apiType = APITypeBaidu
  103. case common.ChannelTypePaLM:
  104. apiType = APITypePaLM
  105. case common.ChannelTypeZhipu:
  106. apiType = APITypeZhipu
  107. case common.ChannelTypeAli:
  108. apiType = APITypeAli
  109. case common.ChannelTypeXunfei:
  110. apiType = APITypeXunfei
  111. case common.ChannelTypeAIProxyLibrary:
  112. apiType = APITypeAIProxyLibrary
  113. case common.ChannelTypeTencent:
  114. apiType = APITypeTencent
  115. case common.ChannelTypeGemini:
  116. apiType = APITypeGemini
  117. }
  118. baseURL := common.ChannelBaseURLs[channelType]
  119. requestURL := c.Request.URL.String()
  120. if c.GetString("base_url") != "" {
  121. baseURL = c.GetString("base_url")
  122. }
  123. fullRequestURL := getFullRequestURL(baseURL, requestURL, channelType)
  124. switch apiType {
  125. case APITypeOpenAI:
  126. if channelType == common.ChannelTypeAzure {
  127. // https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api
  128. query := c.Request.URL.Query()
  129. apiVersion := query.Get("api-version")
  130. if apiVersion == "" {
  131. apiVersion = c.GetString("api_version")
  132. }
  133. requestURL := strings.Split(requestURL, "?")[0]
  134. requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, apiVersion)
  135. baseURL = c.GetString("base_url")
  136. task := strings.TrimPrefix(requestURL, "/v1/")
  137. model_ := textRequest.Model
  138. model_ = strings.Replace(model_, ".", "", -1)
  139. // https://github.com/songquanpeng/one-api/issues/67
  140. model_ = strings.TrimSuffix(model_, "-0301")
  141. model_ = strings.TrimSuffix(model_, "-0314")
  142. model_ = strings.TrimSuffix(model_, "-0613")
  143. fullRequestURL = fmt.Sprintf("%s/openai/deployments/%s/%s", baseURL, model_, task)
  144. }
  145. case APITypeClaude:
  146. fullRequestURL = "https://api.anthropic.com/v1/complete"
  147. if baseURL != "" {
  148. fullRequestURL = fmt.Sprintf("%s/v1/complete", baseURL)
  149. }
  150. case APITypeBaidu:
  151. switch textRequest.Model {
  152. case "ERNIE-Bot":
  153. fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions"
  154. case "ERNIE-Bot-turbo":
  155. fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/eb-instant"
  156. case "ERNIE-Bot-4":
  157. fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro"
  158. case "BLOOMZ-7B":
  159. fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/bloomz_7b1"
  160. case "Embedding-V1":
  161. fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings/embedding-v1"
  162. }
  163. apiKey := c.Request.Header.Get("Authorization")
  164. apiKey = strings.TrimPrefix(apiKey, "Bearer ")
  165. var err error
  166. if apiKey, err = getBaiduAccessToken(apiKey); err != nil {
  167. return errorWrapper(err, "invalid_baidu_config", http.StatusInternalServerError)
  168. }
  169. fullRequestURL += "?access_token=" + apiKey
  170. case APITypePaLM:
  171. fullRequestURL = "https://generativelanguage.googleapis.com/v1beta2/models/chat-bison-001:generateMessage"
  172. if baseURL != "" {
  173. fullRequestURL = fmt.Sprintf("%s/v1beta2/models/chat-bison-001:generateMessage", baseURL)
  174. }
  175. apiKey := c.Request.Header.Get("Authorization")
  176. apiKey = strings.TrimPrefix(apiKey, "Bearer ")
  177. fullRequestURL += "?key=" + apiKey
  178. case APITypeGemini:
  179. requestBaseURL := "https://generativelanguage.googleapis.com"
  180. if baseURL != "" {
  181. requestBaseURL = baseURL
  182. }
  183. version := "v1beta"
  184. if c.GetString("api_version") != "" {
  185. version = c.GetString("api_version")
  186. }
  187. action := "generateContent"
  188. if textRequest.Stream {
  189. action = "streamGenerateContent"
  190. }
  191. fullRequestURL = fmt.Sprintf("%s/%s/models/%s:%s", requestBaseURL, version, textRequest.Model, action)
  192. apiKey := c.Request.Header.Get("Authorization")
  193. apiKey = strings.TrimPrefix(apiKey, "Bearer ")
  194. fullRequestURL += "?key=" + apiKey
  195. //log.Println(fullRequestURL)
  196. case APITypeZhipu:
  197. method := "invoke"
  198. if textRequest.Stream {
  199. method = "sse-invoke"
  200. }
  201. fullRequestURL = fmt.Sprintf("https://open.bigmodel.cn/api/paas/v3/model-api/%s/%s", textRequest.Model, method)
  202. case APITypeAli:
  203. fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation"
  204. if relayMode == RelayModeEmbeddings {
  205. fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/embeddings/text-embedding/text-embedding"
  206. }
  207. case APITypeTencent:
  208. fullRequestURL = "https://hunyuan.cloud.tencent.com/hyllm/v1/chat/completions"
  209. case APITypeAIProxyLibrary:
  210. fullRequestURL = fmt.Sprintf("%s/api/library/ask", baseURL)
  211. }
  212. var promptTokens int
  213. var completionTokens int
  214. switch relayMode {
  215. case RelayModeChatCompletions:
  216. promptTokens, err = countTokenMessages(textRequest.Messages, textRequest.Model)
  217. if err != nil {
  218. return errorWrapper(err, "count_token_messages_failed", http.StatusInternalServerError)
  219. }
  220. case RelayModeCompletions:
  221. promptTokens = countTokenInput(textRequest.Prompt, textRequest.Model)
  222. case RelayModeModerations:
  223. promptTokens = countTokenInput(textRequest.Input, textRequest.Model)
  224. }
  225. preConsumedTokens := common.PreConsumedQuota
  226. if textRequest.MaxTokens != 0 {
  227. preConsumedTokens = promptTokens + textRequest.MaxTokens
  228. }
  229. modelRatio := common.GetModelRatio(textRequest.Model)
  230. groupRatio := common.GetGroupRatio(group)
  231. ratio := modelRatio * groupRatio
  232. preConsumedQuota := int(float64(preConsumedTokens) * ratio)
  233. userQuota, err := model.CacheGetUserQuota(userId)
  234. if err != nil {
  235. return errorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError)
  236. }
  237. if userQuota < 0 || userQuota-preConsumedQuota < 0 {
  238. return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
  239. }
  240. err = model.CacheDecreaseUserQuota(userId, preConsumedQuota)
  241. if err != nil {
  242. return errorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError)
  243. }
  244. if userQuota > 100*preConsumedQuota {
  245. // in this case, we do not pre-consume quota
  246. // because the user has enough quota
  247. preConsumedQuota = 0
  248. common.LogInfo(c.Request.Context(), fmt.Sprintf("user %d has enough quota %d, trusted and no need to pre-consume", userId, userQuota))
  249. }
  250. if preConsumedQuota > 0 {
  251. userQuota, err = model.PreConsumeTokenQuota(tokenId, preConsumedQuota)
  252. if err != nil {
  253. return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
  254. }
  255. }
  256. var requestBody io.Reader
  257. if isModelMapped {
  258. jsonStr, err := json.Marshal(textRequest)
  259. if err != nil {
  260. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  261. }
  262. requestBody = bytes.NewBuffer(jsonStr)
  263. } else {
  264. requestBody = c.Request.Body
  265. }
  266. switch apiType {
  267. case APITypeClaude:
  268. claudeRequest := requestOpenAI2Claude(textRequest)
  269. jsonStr, err := json.Marshal(claudeRequest)
  270. if err != nil {
  271. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  272. }
  273. requestBody = bytes.NewBuffer(jsonStr)
  274. case APITypeBaidu:
  275. var jsonData []byte
  276. var err error
  277. switch relayMode {
  278. case RelayModeEmbeddings:
  279. baiduEmbeddingRequest := embeddingRequestOpenAI2Baidu(textRequest)
  280. jsonData, err = json.Marshal(baiduEmbeddingRequest)
  281. default:
  282. baiduRequest := requestOpenAI2Baidu(textRequest)
  283. jsonData, err = json.Marshal(baiduRequest)
  284. }
  285. if err != nil {
  286. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  287. }
  288. requestBody = bytes.NewBuffer(jsonData)
  289. case APITypePaLM:
  290. palmRequest := requestOpenAI2PaLM(textRequest)
  291. jsonStr, err := json.Marshal(palmRequest)
  292. if err != nil {
  293. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  294. }
  295. requestBody = bytes.NewBuffer(jsonStr)
  296. case APITypeGemini:
  297. geminiChatRequest := requestOpenAI2Gemini(textRequest)
  298. jsonStr, err := json.Marshal(geminiChatRequest)
  299. if err != nil {
  300. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  301. }
  302. requestBody = bytes.NewBuffer(jsonStr)
  303. case APITypeZhipu:
  304. zhipuRequest := requestOpenAI2Zhipu(textRequest)
  305. jsonStr, err := json.Marshal(zhipuRequest)
  306. if err != nil {
  307. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  308. }
  309. requestBody = bytes.NewBuffer(jsonStr)
  310. case APITypeAli:
  311. var jsonStr []byte
  312. var err error
  313. switch relayMode {
  314. case RelayModeEmbeddings:
  315. aliEmbeddingRequest := embeddingRequestOpenAI2Ali(textRequest)
  316. jsonStr, err = json.Marshal(aliEmbeddingRequest)
  317. default:
  318. aliRequest := requestOpenAI2Ali(textRequest)
  319. jsonStr, err = json.Marshal(aliRequest)
  320. }
  321. if err != nil {
  322. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  323. }
  324. requestBody = bytes.NewBuffer(jsonStr)
  325. case APITypeTencent:
  326. apiKey := c.Request.Header.Get("Authorization")
  327. apiKey = strings.TrimPrefix(apiKey, "Bearer ")
  328. appId, secretId, secretKey, err := parseTencentConfig(apiKey)
  329. if err != nil {
  330. return errorWrapper(err, "invalid_tencent_config", http.StatusInternalServerError)
  331. }
  332. tencentRequest := requestOpenAI2Tencent(textRequest)
  333. tencentRequest.AppId = appId
  334. tencentRequest.SecretId = secretId
  335. jsonStr, err := json.Marshal(tencentRequest)
  336. if err != nil {
  337. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  338. }
  339. sign := getTencentSign(*tencentRequest, secretKey)
  340. c.Request.Header.Set("Authorization", sign)
  341. requestBody = bytes.NewBuffer(jsonStr)
  342. case APITypeAIProxyLibrary:
  343. aiProxyLibraryRequest := requestOpenAI2AIProxyLibrary(textRequest)
  344. aiProxyLibraryRequest.LibraryId = c.GetString("library_id")
  345. jsonStr, err := json.Marshal(aiProxyLibraryRequest)
  346. if err != nil {
  347. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  348. }
  349. requestBody = bytes.NewBuffer(jsonStr)
  350. }
  351. var req *http.Request
  352. var resp *http.Response
  353. isStream := textRequest.Stream
  354. if apiType != APITypeXunfei { // cause xunfei use websocket
  355. req, err = http.NewRequest(c.Request.Method, fullRequestURL, requestBody)
  356. // 设置GetBody函数,该函数返回一个新的io.ReadCloser,该io.ReadCloser返回与原始请求体相同的数据
  357. req.GetBody = func() (io.ReadCloser, error) {
  358. return io.NopCloser(requestBody), nil
  359. }
  360. if err != nil {
  361. return errorWrapper(err, "new_request_failed", http.StatusInternalServerError)
  362. }
  363. apiKey := c.Request.Header.Get("Authorization")
  364. apiKey = strings.TrimPrefix(apiKey, "Bearer ")
  365. switch apiType {
  366. case APITypeOpenAI:
  367. if channelType == common.ChannelTypeAzure {
  368. req.Header.Set("api-key", apiKey)
  369. } else {
  370. req.Header.Set("Authorization", c.Request.Header.Get("Authorization"))
  371. if c.Request.Header.Get("OpenAI-Organization") != "" {
  372. req.Header.Set("OpenAI-Organization", c.Request.Header.Get("OpenAI-Organization"))
  373. }
  374. if channelType == common.ChannelTypeOpenRouter {
  375. req.Header.Set("HTTP-Referer", "https://github.com/songquanpeng/one-api")
  376. req.Header.Set("X-Title", "One API")
  377. }
  378. }
  379. case APITypeClaude:
  380. req.Header.Set("x-api-key", apiKey)
  381. anthropicVersion := c.Request.Header.Get("anthropic-version")
  382. if anthropicVersion == "" {
  383. anthropicVersion = "2023-06-01"
  384. }
  385. req.Header.Set("anthropic-version", anthropicVersion)
  386. case APITypeZhipu:
  387. token := getZhipuToken(apiKey)
  388. req.Header.Set("Authorization", token)
  389. case APITypeAli:
  390. req.Header.Set("Authorization", "Bearer "+apiKey)
  391. if textRequest.Stream {
  392. req.Header.Set("X-DashScope-SSE", "enable")
  393. }
  394. case APITypeTencent:
  395. req.Header.Set("Authorization", apiKey)
  396. default:
  397. req.Header.Set("Authorization", "Bearer "+apiKey)
  398. }
  399. req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type"))
  400. req.Header.Set("Accept", c.Request.Header.Get("Accept"))
  401. if isStream && c.Request.Header.Get("Accept") == "" {
  402. req.Header.Set("Accept", "text/event-stream")
  403. }
  404. //req.HeaderBar.Set("Connection", c.Request.HeaderBar.Get("Connection"))
  405. resp, err = httpClient.Do(req)
  406. if err != nil {
  407. return errorWrapper(err, "do_request_failed", http.StatusInternalServerError)
  408. }
  409. err = req.Body.Close()
  410. if err != nil {
  411. return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
  412. }
  413. err = c.Request.Body.Close()
  414. if err != nil {
  415. return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
  416. }
  417. isStream = isStream || strings.HasPrefix(resp.Header.Get("Content-Type"), "text/event-stream")
  418. if resp.StatusCode != http.StatusOK {
  419. if preConsumedQuota != 0 {
  420. go func(ctx context.Context) {
  421. // return pre-consumed quota
  422. err := model.PostConsumeTokenQuota(tokenId, userQuota, -preConsumedQuota, 0, false)
  423. if err != nil {
  424. common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
  425. }
  426. }(c.Request.Context())
  427. }
  428. return relayErrorHandler(resp)
  429. }
  430. }
  431. var textResponse TextResponse
  432. tokenName := c.GetString("token_name")
  433. defer func(ctx context.Context) {
  434. // c.Writer.Flush()
  435. go func() {
  436. quota := 0
  437. completionRatio := common.GetCompletionRatio(textRequest.Model)
  438. promptTokens = textResponse.Usage.PromptTokens
  439. completionTokens = textResponse.Usage.CompletionTokens
  440. quota = promptTokens + int(float64(completionTokens)*completionRatio)
  441. quota = int(float64(quota) * ratio)
  442. if ratio != 0 && quota <= 0 {
  443. quota = 1
  444. }
  445. totalTokens := promptTokens + completionTokens
  446. if totalTokens == 0 {
  447. // in this case, must be some error happened
  448. // we cannot just return, because we may have to return the pre-consumed quota
  449. quota = 0
  450. }
  451. quotaDelta := quota - preConsumedQuota
  452. err := model.PostConsumeTokenQuota(tokenId, userQuota, quotaDelta, preConsumedQuota, true)
  453. if err != nil {
  454. common.LogError(ctx, "error consuming token remain quota: "+err.Error())
  455. }
  456. err = model.CacheUpdateUserQuota(userId)
  457. if err != nil {
  458. common.LogError(ctx, "error update user quota cache: "+err.Error())
  459. }
  460. // record all the consume log even if quota is 0
  461. useTimeSeconds := time.Now().Unix() - startTime.Unix()
  462. logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f,用时 %d秒", modelRatio, groupRatio, useTimeSeconds)
  463. model.RecordConsumeLog(ctx, userId, channelId, promptTokens, completionTokens, textRequest.Model, tokenName, quota, logContent, tokenId, userQuota)
  464. model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
  465. model.UpdateChannelUsedQuota(channelId, quota)
  466. //if quota != 0 {
  467. //
  468. //}
  469. }()
  470. }(c.Request.Context())
  471. switch apiType {
  472. case APITypeOpenAI:
  473. if isStream {
  474. err, responseText := openaiStreamHandler(c, resp, relayMode)
  475. if err != nil {
  476. return err
  477. }
  478. textResponse.Usage.PromptTokens = promptTokens
  479. textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
  480. return nil
  481. } else {
  482. err, usage := openaiHandler(c, resp, promptTokens, textRequest.Model)
  483. if err != nil {
  484. return err
  485. }
  486. if usage != nil {
  487. textResponse.Usage = *usage
  488. }
  489. return nil
  490. }
  491. case APITypeClaude:
  492. if isStream {
  493. err, responseText := claudeStreamHandler(c, resp)
  494. if err != nil {
  495. return err
  496. }
  497. textResponse.Usage.PromptTokens = promptTokens
  498. textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
  499. return nil
  500. } else {
  501. err, usage := claudeHandler(c, resp, promptTokens, textRequest.Model)
  502. if err != nil {
  503. return err
  504. }
  505. if usage != nil {
  506. textResponse.Usage = *usage
  507. }
  508. return nil
  509. }
  510. case APITypeBaidu:
  511. if isStream {
  512. err, usage := baiduStreamHandler(c, resp)
  513. if err != nil {
  514. return err
  515. }
  516. if usage != nil {
  517. textResponse.Usage = *usage
  518. }
  519. return nil
  520. } else {
  521. var err *OpenAIErrorWithStatusCode
  522. var usage *Usage
  523. switch relayMode {
  524. case RelayModeEmbeddings:
  525. err, usage = baiduEmbeddingHandler(c, resp)
  526. default:
  527. err, usage = baiduHandler(c, resp)
  528. }
  529. if err != nil {
  530. return err
  531. }
  532. if usage != nil {
  533. textResponse.Usage = *usage
  534. }
  535. return nil
  536. }
  537. case APITypePaLM:
  538. if textRequest.Stream { // PaLM2 API does not support stream
  539. err, responseText := palmStreamHandler(c, resp)
  540. if err != nil {
  541. return err
  542. }
  543. textResponse.Usage.PromptTokens = promptTokens
  544. textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
  545. return nil
  546. } else {
  547. err, usage := palmHandler(c, resp, promptTokens, textRequest.Model)
  548. if err != nil {
  549. return err
  550. }
  551. if usage != nil {
  552. textResponse.Usage = *usage
  553. }
  554. return nil
  555. }
  556. case APITypeGemini:
  557. if textRequest.Stream {
  558. err, responseText := geminiChatStreamHandler(c, resp)
  559. if err != nil {
  560. return err
  561. }
  562. textResponse.Usage.PromptTokens = promptTokens
  563. textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
  564. return nil
  565. } else {
  566. err, usage := geminiChatHandler(c, resp, promptTokens, textRequest.Model)
  567. if err != nil {
  568. return err
  569. }
  570. if usage != nil {
  571. textResponse.Usage = *usage
  572. }
  573. return nil
  574. }
  575. case APITypeZhipu:
  576. if isStream {
  577. err, usage := zhipuStreamHandler(c, resp)
  578. if err != nil {
  579. return err
  580. }
  581. if usage != nil {
  582. textResponse.Usage = *usage
  583. }
  584. // zhipu's API does not return prompt tokens & completion tokens
  585. textResponse.Usage.PromptTokens = textResponse.Usage.TotalTokens
  586. return nil
  587. } else {
  588. err, usage := zhipuHandler(c, resp)
  589. if err != nil {
  590. return err
  591. }
  592. if usage != nil {
  593. textResponse.Usage = *usage
  594. }
  595. // zhipu's API does not return prompt tokens & completion tokens
  596. textResponse.Usage.PromptTokens = textResponse.Usage.TotalTokens
  597. return nil
  598. }
  599. case APITypeAli:
  600. if isStream {
  601. err, usage := aliStreamHandler(c, resp)
  602. if err != nil {
  603. return err
  604. }
  605. if usage != nil {
  606. textResponse.Usage = *usage
  607. }
  608. return nil
  609. } else {
  610. var err *OpenAIErrorWithStatusCode
  611. var usage *Usage
  612. switch relayMode {
  613. case RelayModeEmbeddings:
  614. err, usage = aliEmbeddingHandler(c, resp)
  615. default:
  616. err, usage = aliHandler(c, resp)
  617. }
  618. if err != nil {
  619. return err
  620. }
  621. if usage != nil {
  622. textResponse.Usage = *usage
  623. }
  624. return nil
  625. }
  626. case APITypeXunfei:
  627. auth := c.Request.Header.Get("Authorization")
  628. auth = strings.TrimPrefix(auth, "Bearer ")
  629. splits := strings.Split(auth, "|")
  630. if len(splits) != 3 {
  631. return errorWrapper(errors.New("invalid auth"), "invalid_auth", http.StatusBadRequest)
  632. }
  633. var err *OpenAIErrorWithStatusCode
  634. var usage *Usage
  635. if isStream {
  636. err, usage = xunfeiStreamHandler(c, textRequest, splits[0], splits[1], splits[2])
  637. } else {
  638. err, usage = xunfeiHandler(c, textRequest, splits[0], splits[1], splits[2])
  639. }
  640. if err != nil {
  641. return err
  642. }
  643. if usage != nil {
  644. textResponse.Usage = *usage
  645. }
  646. return nil
  647. case APITypeAIProxyLibrary:
  648. if isStream {
  649. err, usage := aiProxyLibraryStreamHandler(c, resp)
  650. if err != nil {
  651. return err
  652. }
  653. if usage != nil {
  654. textResponse.Usage = *usage
  655. }
  656. return nil
  657. } else {
  658. err, usage := aiProxyLibraryHandler(c, resp)
  659. if err != nil {
  660. return err
  661. }
  662. if usage != nil {
  663. textResponse.Usage = *usage
  664. }
  665. return nil
  666. }
  667. case APITypeTencent:
  668. if isStream {
  669. err, responseText := tencentStreamHandler(c, resp)
  670. if err != nil {
  671. return err
  672. }
  673. textResponse.Usage.PromptTokens = promptTokens
  674. textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
  675. return nil
  676. } else {
  677. err, usage := tencentHandler(c, resp)
  678. if err != nil {
  679. return err
  680. }
  681. if usage != nil {
  682. textResponse.Usage = *usage
  683. }
  684. return nil
  685. }
  686. default:
  687. return errorWrapper(errors.New("unknown api type"), "unknown_api_type", http.StatusInternalServerError)
  688. }
  689. }