relay-text.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. package relay
  2. import (
  3. "bytes"
  4. "context"
  5. "encoding/json"
  6. "errors"
  7. "fmt"
  8. "io"
  9. "math"
  10. "net/http"
  11. "one-api/common"
  12. "one-api/constant"
  13. "one-api/dto"
  14. "one-api/model"
  15. relaycommon "one-api/relay/common"
  16. relayconstant "one-api/relay/constant"
  17. "one-api/service"
  18. "strings"
  19. "time"
  20. "github.com/gin-gonic/gin"
  21. )
  22. func getAndValidateTextRequest(c *gin.Context, relayInfo *relaycommon.RelayInfo) (*dto.GeneralOpenAIRequest, error) {
  23. textRequest := &dto.GeneralOpenAIRequest{}
  24. err := common.UnmarshalBodyReusable(c, textRequest)
  25. if err != nil {
  26. return nil, err
  27. }
  28. if relayInfo.RelayMode == relayconstant.RelayModeModerations && textRequest.Model == "" {
  29. textRequest.Model = "text-moderation-latest"
  30. }
  31. if relayInfo.RelayMode == relayconstant.RelayModeEmbeddings && textRequest.Model == "" {
  32. textRequest.Model = c.Param("model")
  33. }
  34. if textRequest.MaxTokens < 0 || textRequest.MaxTokens > math.MaxInt32/2 {
  35. return nil, errors.New("max_tokens is invalid")
  36. }
  37. if textRequest.Model == "" {
  38. return nil, errors.New("model is required")
  39. }
  40. switch relayInfo.RelayMode {
  41. case relayconstant.RelayModeCompletions:
  42. if textRequest.Prompt == "" {
  43. return nil, errors.New("field prompt is required")
  44. }
  45. case relayconstant.RelayModeChatCompletions:
  46. if textRequest.Messages == nil || len(textRequest.Messages) == 0 {
  47. return nil, errors.New("field messages is required")
  48. }
  49. case relayconstant.RelayModeEmbeddings:
  50. case relayconstant.RelayModeModerations:
  51. if textRequest.Input == "" {
  52. return nil, errors.New("field input is required")
  53. }
  54. case relayconstant.RelayModeEdits:
  55. if textRequest.Instruction == "" {
  56. return nil, errors.New("field instruction is required")
  57. }
  58. }
  59. relayInfo.IsStream = textRequest.Stream
  60. return textRequest, nil
  61. }
  62. func TextHelper(c *gin.Context) *dto.OpenAIErrorWithStatusCode {
  63. relayInfo := relaycommon.GenRelayInfo(c)
  64. // get & validate textRequest 获取并验证文本请求
  65. textRequest, err := getAndValidateTextRequest(c, relayInfo)
  66. if err != nil {
  67. common.LogError(c, fmt.Sprintf("getAndValidateTextRequest failed: %s", err.Error()))
  68. return service.OpenAIErrorWrapper(err, "invalid_text_request", http.StatusBadRequest)
  69. }
  70. // map model name
  71. modelMapping := c.GetString("model_mapping")
  72. isModelMapped := false
  73. if modelMapping != "" && modelMapping != "{}" {
  74. modelMap := make(map[string]string)
  75. err := json.Unmarshal([]byte(modelMapping), &modelMap)
  76. if err != nil {
  77. return service.OpenAIErrorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError)
  78. }
  79. if modelMap[textRequest.Model] != "" {
  80. textRequest.Model = modelMap[textRequest.Model]
  81. // set upstream model name
  82. isModelMapped = true
  83. }
  84. }
  85. relayInfo.UpstreamModelName = textRequest.Model
  86. modelPrice := common.GetModelPrice(textRequest.Model, false)
  87. groupRatio := common.GetGroupRatio(relayInfo.Group)
  88. var preConsumedQuota int
  89. var ratio float64
  90. var modelRatio float64
  91. //err := service.SensitiveWordsCheck(textRequest)
  92. promptTokens, err := getPromptTokens(textRequest, relayInfo)
  93. // count messages token error 计算promptTokens错误
  94. if err != nil {
  95. return service.OpenAIErrorWrapper(err, "count_token_messages_failed", http.StatusInternalServerError)
  96. }
  97. if modelPrice == -1 {
  98. preConsumedTokens := common.PreConsumedQuota
  99. if textRequest.MaxTokens != 0 {
  100. preConsumedTokens = promptTokens + int(textRequest.MaxTokens)
  101. }
  102. modelRatio = common.GetModelRatio(textRequest.Model)
  103. ratio = modelRatio * groupRatio
  104. preConsumedQuota = int(float64(preConsumedTokens) * ratio)
  105. } else {
  106. preConsumedQuota = int(modelPrice * common.QuotaPerUnit * groupRatio)
  107. }
  108. // pre-consume quota 预消耗配额
  109. preConsumedQuota, userQuota, openaiErr := preConsumeQuota(c, preConsumedQuota, relayInfo)
  110. if openaiErr != nil {
  111. return openaiErr
  112. }
  113. adaptor := GetAdaptor(relayInfo.ApiType)
  114. if adaptor == nil {
  115. return service.OpenAIErrorWrapper(fmt.Errorf("invalid api type: %d", relayInfo.ApiType), "invalid_api_type", http.StatusBadRequest)
  116. }
  117. adaptor.Init(relayInfo, *textRequest)
  118. var requestBody io.Reader
  119. if relayInfo.ApiType == relayconstant.APITypeOpenAI {
  120. if isModelMapped {
  121. jsonStr, err := json.Marshal(textRequest)
  122. if err != nil {
  123. return service.OpenAIErrorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
  124. }
  125. requestBody = bytes.NewBuffer(jsonStr)
  126. } else {
  127. requestBody = c.Request.Body
  128. }
  129. } else {
  130. convertedRequest, err := adaptor.ConvertRequest(c, relayInfo.RelayMode, textRequest)
  131. if err != nil {
  132. return service.OpenAIErrorWrapper(err, "convert_request_failed", http.StatusInternalServerError)
  133. }
  134. jsonData, err := json.Marshal(convertedRequest)
  135. if err != nil {
  136. return service.OpenAIErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
  137. }
  138. requestBody = bytes.NewBuffer(jsonData)
  139. }
  140. resp, err := adaptor.DoRequest(c, relayInfo, requestBody)
  141. if err != nil {
  142. return service.OpenAIErrorWrapper(err, "do_request_failed", http.StatusInternalServerError)
  143. }
  144. relayInfo.IsStream = relayInfo.IsStream || strings.HasPrefix(resp.Header.Get("Content-Type"), "text/event-stream")
  145. if resp.StatusCode != http.StatusOK {
  146. returnPreConsumedQuota(c, relayInfo.TokenId, userQuota, preConsumedQuota)
  147. return service.RelayErrorHandler(resp)
  148. }
  149. usage, openaiErr, sensitiveResp := adaptor.DoResponse(c, resp, relayInfo)
  150. if openaiErr != nil {
  151. if sensitiveResp == nil { // 如果没有敏感词检查结果
  152. returnPreConsumedQuota(c, relayInfo.TokenId, userQuota, preConsumedQuota)
  153. return openaiErr
  154. } else {
  155. // 如果有敏感词检查结果,不返回预消耗配额,继续消耗配额
  156. postConsumeQuota(c, relayInfo, *textRequest, usage, ratio, preConsumedQuota, userQuota, modelRatio, groupRatio, modelPrice, sensitiveResp)
  157. if constant.StopOnSensitiveEnabled { // 是否直接返回错误
  158. return openaiErr
  159. }
  160. return nil
  161. }
  162. }
  163. postConsumeQuota(c, relayInfo, *textRequest, usage, ratio, preConsumedQuota, userQuota, modelRatio, groupRatio, modelPrice, nil)
  164. return nil
  165. }
  166. func getPromptTokens(textRequest *dto.GeneralOpenAIRequest, info *relaycommon.RelayInfo) (int, error) {
  167. var promptTokens int
  168. var err error
  169. checkSensitive := constant.ShouldCheckPromptSensitive()
  170. switch info.RelayMode {
  171. case relayconstant.RelayModeChatCompletions:
  172. promptTokens, err = service.CountTokenMessages(textRequest.Messages, textRequest.Model, checkSensitive)
  173. case relayconstant.RelayModeCompletions:
  174. promptTokens, err = service.CountTokenInput(textRequest.Prompt, textRequest.Model, checkSensitive)
  175. case relayconstant.RelayModeModerations:
  176. promptTokens, err = service.CountTokenInput(textRequest.Input, textRequest.Model, checkSensitive)
  177. case relayconstant.RelayModeEmbeddings:
  178. promptTokens, err = service.CountTokenInput(textRequest.Input, textRequest.Model, checkSensitive)
  179. default:
  180. err = errors.New("unknown relay mode")
  181. promptTokens = 0
  182. }
  183. info.PromptTokens = promptTokens
  184. return promptTokens, err
  185. }
  186. // 预扣费并返回用户剩余配额
  187. func preConsumeQuota(c *gin.Context, preConsumedQuota int, relayInfo *relaycommon.RelayInfo) (int, int, *dto.OpenAIErrorWithStatusCode) {
  188. userQuota, err := model.CacheGetUserQuota(relayInfo.UserId)
  189. if err != nil {
  190. return 0, 0, service.OpenAIErrorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError)
  191. }
  192. if userQuota <= 0 || userQuota-preConsumedQuota < 0 {
  193. return 0, 0, service.OpenAIErrorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
  194. }
  195. err = model.CacheDecreaseUserQuota(relayInfo.UserId, preConsumedQuota)
  196. if err != nil {
  197. return 0, 0, service.OpenAIErrorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError)
  198. }
  199. if userQuota > 100*preConsumedQuota {
  200. // 用户额度充足,判断令牌额度是否充足
  201. if !relayInfo.TokenUnlimited {
  202. // 非无限令牌,判断令牌额度是否充足
  203. tokenQuota := c.GetInt("token_quota")
  204. if tokenQuota > 100*preConsumedQuota {
  205. // 令牌额度充足,信任令牌
  206. preConsumedQuota = 0
  207. common.LogInfo(c.Request.Context(), fmt.Sprintf("user %d quota %d and token %d quota %d are enough, trusted and no need to pre-consume", relayInfo.UserId, userQuota, relayInfo.TokenId, tokenQuota))
  208. }
  209. } else {
  210. // in this case, we do not pre-consume quota
  211. // because the user has enough quota
  212. preConsumedQuota = 0
  213. common.LogInfo(c.Request.Context(), fmt.Sprintf("user %d with unlimited token has enough quota %d, trusted and no need to pre-consume", relayInfo.UserId, userQuota))
  214. }
  215. }
  216. if preConsumedQuota > 0 {
  217. userQuota, err = model.PreConsumeTokenQuota(relayInfo.TokenId, preConsumedQuota)
  218. if err != nil {
  219. return 0, 0, service.OpenAIErrorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
  220. }
  221. }
  222. return preConsumedQuota, userQuota, nil
  223. }
  224. func returnPreConsumedQuota(c *gin.Context, tokenId int, userQuota int, preConsumedQuota int) {
  225. if preConsumedQuota != 0 {
  226. go func(ctx context.Context) {
  227. // return pre-consumed quota
  228. err := model.PostConsumeTokenQuota(tokenId, userQuota, -preConsumedQuota, 0, false)
  229. if err != nil {
  230. common.SysError("error return pre-consumed quota: " + err.Error())
  231. }
  232. }(c)
  233. }
  234. }
  235. func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, textRequest dto.GeneralOpenAIRequest,
  236. usage *dto.Usage, ratio float64, preConsumedQuota int, userQuota int, modelRatio float64, groupRatio float64,
  237. modelPrice float64, sensitiveResp *dto.SensitiveResponse) {
  238. useTimeSeconds := time.Now().Unix() - relayInfo.StartTime.Unix()
  239. promptTokens := usage.PromptTokens
  240. completionTokens := usage.CompletionTokens
  241. tokenName := ctx.GetString("token_name")
  242. quota := 0
  243. if modelPrice == -1 {
  244. completionRatio := common.GetCompletionRatio(textRequest.Model)
  245. quota = promptTokens + int(float64(completionTokens)*completionRatio)
  246. quota = int(float64(quota) * ratio)
  247. if ratio != 0 && quota <= 0 {
  248. quota = 1
  249. }
  250. } else {
  251. quota = int(modelPrice * common.QuotaPerUnit * groupRatio)
  252. }
  253. totalTokens := promptTokens + completionTokens
  254. var logContent string
  255. if modelPrice == -1 {
  256. logContent = fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
  257. } else {
  258. logContent = fmt.Sprintf("模型价格 %.2f,分组倍率 %.2f", modelPrice, groupRatio)
  259. }
  260. // record all the consume log even if quota is 0
  261. if totalTokens == 0 {
  262. // in this case, must be some error happened
  263. // we cannot just return, because we may have to return the pre-consumed quota
  264. quota = 0
  265. logContent += fmt.Sprintf("(可能是上游超时)")
  266. common.LogError(ctx, fmt.Sprintf("total tokens is 0, cannot consume quota, userId %d, channelId %d, tokenId %d, model %s, pre-consumed quota %d", relayInfo.UserId, relayInfo.ChannelId, relayInfo.TokenId, textRequest.Model, preConsumedQuota))
  267. } else {
  268. if sensitiveResp != nil {
  269. logContent += fmt.Sprintf(",敏感词:%s", strings.Join(sensitiveResp.SensitiveWords, ", "))
  270. }
  271. quotaDelta := quota - preConsumedQuota
  272. err := model.PostConsumeTokenQuota(relayInfo.TokenId, userQuota, quotaDelta, preConsumedQuota, true)
  273. if err != nil {
  274. common.LogError(ctx, "error consuming token remain quota: "+err.Error())
  275. }
  276. err = model.CacheUpdateUserQuota(relayInfo.UserId)
  277. if err != nil {
  278. common.LogError(ctx, "error update user quota cache: "+err.Error())
  279. }
  280. model.UpdateUserUsedQuotaAndRequestCount(relayInfo.UserId, quota)
  281. model.UpdateChannelUsedQuota(relayInfo.ChannelId, quota)
  282. }
  283. logModel := textRequest.Model
  284. if strings.HasPrefix(logModel, "gpt-4-gizmo") {
  285. logModel = "gpt-4-gizmo-*"
  286. logContent += fmt.Sprintf(",模型 %s", textRequest.Model)
  287. }
  288. model.RecordConsumeLog(ctx, relayInfo.UserId, relayInfo.ChannelId, promptTokens, completionTokens, logModel, tokenName, quota, logContent, relayInfo.TokenId, userQuota, int(useTimeSeconds), relayInfo.IsStream)
  289. //if quota != 0 {
  290. //
  291. //}
  292. }