relay-text.go 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. package controller
  2. import (
  3. "bytes"
  4. "encoding/json"
  5. "errors"
  6. "fmt"
  7. "io"
  8. "net/http"
  9. "one-api/common"
  10. "one-api/model"
  11. "strings"
  12. "github.com/gin-gonic/gin"
  13. )
  14. const (
  15. APITypeOpenAI = iota
  16. APITypeClaude
  17. APITypePaLM
  18. )
  19. func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
  20. channelType := c.GetInt("channel")
  21. tokenId := c.GetInt("token_id")
  22. userId := c.GetInt("id")
  23. consumeQuota := c.GetBool("consume_quota")
  24. group := c.GetString("group")
  25. var textRequest GeneralOpenAIRequest
  26. if consumeQuota || channelType == common.ChannelTypeAzure || channelType == common.ChannelTypePaLM {
  27. err := common.UnmarshalBodyReusable(c, &textRequest)
  28. if err != nil {
  29. return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
  30. }
  31. }
  32. if relayMode == RelayModeModerations && textRequest.Model == "" {
  33. textRequest.Model = "text-moderation-latest"
  34. }
  35. if relayMode == RelayModeEmbeddings && textRequest.Model == "" {
  36. textRequest.Model = c.Param("model")
  37. }
  38. // request validation
  39. if textRequest.Model == "" {
  40. return errorWrapper(errors.New("model is required"), "required_field_missing", http.StatusBadRequest)
  41. }
  42. switch relayMode {
  43. case RelayModeCompletions:
  44. if textRequest.Prompt == "" {
  45. return errorWrapper(errors.New("field prompt is required"), "required_field_missing", http.StatusBadRequest)
  46. }
  47. case RelayModeChatCompletions:
  48. if textRequest.Messages == nil || len(textRequest.Messages) == 0 {
  49. return errorWrapper(errors.New("field messages is required"), "required_field_missing", http.StatusBadRequest)
  50. }
  51. case RelayModeEmbeddings:
  52. case RelayModeModerations:
  53. if textRequest.Input == "" {
  54. return errorWrapper(errors.New("field input is required"), "required_field_missing", http.StatusBadRequest)
  55. }
  56. case RelayModeEdits:
  57. if textRequest.Instruction == "" {
  58. return errorWrapper(errors.New("field instruction is required"), "required_field_missing", http.StatusBadRequest)
  59. }
  60. }
  61. // map model name
  62. modelMapping := c.GetString("model_mapping")
  63. isModelMapped := false
  64. if modelMapping != "" {
  65. modelMap := make(map[string]string)
  66. err := json.Unmarshal([]byte(modelMapping), &modelMap)
  67. if err != nil {
  68. return errorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError)
  69. }
  70. if modelMap[textRequest.Model] != "" {
  71. textRequest.Model = modelMap[textRequest.Model]
  72. isModelMapped = true
  73. }
  74. }
  75. apiType := APITypeOpenAI
  76. if strings.HasPrefix(textRequest.Model, "claude") {
  77. apiType = APITypeClaude
  78. }
  79. baseURL := common.ChannelBaseURLs[channelType]
  80. requestURL := c.Request.URL.String()
  81. if c.GetString("base_url") != "" {
  82. baseURL = c.GetString("base_url")
  83. }
  84. fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL)
  85. switch apiType {
  86. case APITypeOpenAI:
  87. if channelType == common.ChannelTypeAzure {
  88. // https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api
  89. query := c.Request.URL.Query()
  90. apiVersion := query.Get("api-version")
  91. if apiVersion == "" {
  92. apiVersion = c.GetString("api_version")
  93. }
  94. requestURL := strings.Split(requestURL, "?")[0]
  95. requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, apiVersion)
  96. baseURL = c.GetString("base_url")
  97. task := strings.TrimPrefix(requestURL, "/v1/")
  98. model_ := textRequest.Model
  99. model_ = strings.Replace(model_, ".", "", -1)
  100. // https://github.com/songquanpeng/one-api/issues/67
  101. model_ = strings.TrimSuffix(model_, "-0301")
  102. model_ = strings.TrimSuffix(model_, "-0314")
  103. model_ = strings.TrimSuffix(model_, "-0613")
  104. fullRequestURL = fmt.Sprintf("%s/openai/deployments/%s/%s", baseURL, model_, task)
  105. }
  106. case APITypeClaude:
  107. fullRequestURL = "https://api.anthropic.com/v1/complete"
  108. if baseURL != "" {
  109. fullRequestURL = fmt.Sprintf("%s/v1/complete", baseURL)
  110. }
  111. }
  112. var promptTokens int
  113. var completionTokens int
  114. switch relayMode {
  115. case RelayModeChatCompletions:
  116. promptTokens = countTokenMessages(textRequest.Messages, textRequest.Model)
  117. case RelayModeCompletions:
  118. promptTokens = countTokenInput(textRequest.Prompt, textRequest.Model)
  119. case RelayModeModerations:
  120. promptTokens = countTokenInput(textRequest.Input, textRequest.Model)
  121. }
  122. preConsumedTokens := common.PreConsumedQuota
  123. if textRequest.MaxTokens != 0 {
  124. preConsumedTokens = promptTokens + textRequest.MaxTokens
  125. }
  126. modelRatio := common.GetModelRatio(textRequest.Model)
  127. groupRatio := common.GetGroupRatio(group)
  128. ratio := modelRatio * groupRatio
  129. preConsumedQuota := int(float64(preConsumedTokens) * ratio)
  130. userQuota, err := model.CacheGetUserQuota(userId)
  131. if err != nil {
  132. return errorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError)
  133. }
  134. if userQuota > 10*preConsumedQuota {
  135. // in this case, we do not pre-consume quota
  136. // because the user has enough quota
  137. preConsumedQuota = 0
  138. }
  139. if consumeQuota && preConsumedQuota > 0 {
  140. err := model.PreConsumeTokenQuota(tokenId, preConsumedQuota)
  141. if err != nil {
  142. return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
  143. }
  144. }
  145. var requestBody io.Reader
  146. if isModelMapped {
  147. jsonStr, err := json.Marshal(textRequest)
  148. if err != nil {
  149. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  150. }
  151. requestBody = bytes.NewBuffer(jsonStr)
  152. } else {
  153. requestBody = c.Request.Body
  154. }
  155. switch apiType {
  156. case APITypeClaude:
  157. claudeRequest := requestOpenAI2Claude(textRequest)
  158. jsonStr, err := json.Marshal(claudeRequest)
  159. if err != nil {
  160. return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  161. }
  162. requestBody = bytes.NewBuffer(jsonStr)
  163. }
  164. req, err := http.NewRequest(c.Request.Method, fullRequestURL, requestBody)
  165. if err != nil {
  166. return errorWrapper(err, "new_request_failed", http.StatusInternalServerError)
  167. }
  168. apiKey := c.Request.Header.Get("Authorization")
  169. apiKey = strings.TrimPrefix(apiKey, "Bearer ")
  170. switch apiType {
  171. case APITypeOpenAI:
  172. if channelType == common.ChannelTypeAzure {
  173. req.Header.Set("api-key", apiKey)
  174. } else {
  175. req.Header.Set("Authorization", c.Request.Header.Get("Authorization"))
  176. }
  177. case APITypeClaude:
  178. req.Header.Set("x-api-key", apiKey)
  179. anthropicVersion := c.Request.Header.Get("anthropic-version")
  180. if anthropicVersion == "" {
  181. anthropicVersion = "2023-06-01"
  182. }
  183. req.Header.Set("anthropic-version", anthropicVersion)
  184. }
  185. req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type"))
  186. req.Header.Set("Accept", c.Request.Header.Get("Accept"))
  187. //req.Header.Set("Connection", c.Request.Header.Get("Connection"))
  188. client := &http.Client{}
  189. resp, err := client.Do(req)
  190. if err != nil {
  191. return errorWrapper(err, "do_request_failed", http.StatusInternalServerError)
  192. }
  193. err = req.Body.Close()
  194. if err != nil {
  195. return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
  196. }
  197. err = c.Request.Body.Close()
  198. if err != nil {
  199. return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
  200. }
  201. var textResponse TextResponse
  202. isStream := strings.HasPrefix(resp.Header.Get("Content-Type"), "text/event-stream")
  203. var streamResponseText string
  204. defer func() {
  205. if consumeQuota {
  206. quota := 0
  207. completionRatio := 1.0
  208. if strings.HasPrefix(textRequest.Model, "gpt-3.5") {
  209. completionRatio = 1.333333
  210. }
  211. if strings.HasPrefix(textRequest.Model, "gpt-4") {
  212. completionRatio = 2
  213. }
  214. if isStream {
  215. completionTokens = countTokenText(streamResponseText, textRequest.Model)
  216. } else {
  217. promptTokens = textResponse.Usage.PromptTokens
  218. completionTokens = textResponse.Usage.CompletionTokens
  219. }
  220. quota = promptTokens + int(float64(completionTokens)*completionRatio)
  221. quota = int(float64(quota) * ratio)
  222. if ratio != 0 && quota <= 0 {
  223. quota = 1
  224. }
  225. totalTokens := promptTokens + completionTokens
  226. if totalTokens == 0 {
  227. // in this case, must be some error happened
  228. // we cannot just return, because we may have to return the pre-consumed quota
  229. quota = 0
  230. }
  231. quotaDelta := quota - preConsumedQuota
  232. err := model.PostConsumeTokenQuota(tokenId, quotaDelta)
  233. if err != nil {
  234. common.SysError("error consuming token remain quota: " + err.Error())
  235. }
  236. err = model.CacheUpdateUserQuota(userId)
  237. if err != nil {
  238. common.SysError("error update user quota cache: " + err.Error())
  239. }
  240. if quota != 0 {
  241. tokenName := c.GetString("token_name")
  242. logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
  243. model.RecordConsumeLog(userId, promptTokens, completionTokens, textRequest.Model, tokenName, quota, logContent)
  244. model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
  245. channelId := c.GetInt("channel_id")
  246. model.UpdateChannelUsedQuota(channelId, quota)
  247. }
  248. }
  249. }()
  250. switch apiType {
  251. case APITypeOpenAI:
  252. if isStream {
  253. err, responseText := openaiStreamHandler(c, resp, relayMode)
  254. if err != nil {
  255. return err
  256. }
  257. streamResponseText = responseText
  258. return nil
  259. } else {
  260. err, usage := openaiHandler(c, resp, consumeQuota)
  261. if err != nil {
  262. return err
  263. }
  264. textResponse.Usage = *usage
  265. return nil
  266. }
  267. case APITypeClaude:
  268. if isStream {
  269. err, responseText := claudeStreamHandler(c, resp)
  270. if err != nil {
  271. return err
  272. }
  273. streamResponseText = responseText
  274. return nil
  275. } else {
  276. err, usage := claudeHandler(c, resp, promptTokens, textRequest.Model)
  277. if err != nil {
  278. return err
  279. }
  280. textResponse.Usage = *usage
  281. return nil
  282. }
  283. default:
  284. return errorWrapper(errors.New("unknown api type"), "unknown_api_type", http.StatusInternalServerError)
  285. }
  286. }