relay-text.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393
  1. package relay
  2. import (
  3. "bytes"
  4. "context"
  5. "encoding/json"
  6. "errors"
  7. "fmt"
  8. "github.com/bytedance/sonic"
  9. "io"
  10. "math"
  11. "net/http"
  12. "one-api/common"
  13. "one-api/constant"
  14. "one-api/dto"
  15. "one-api/model"
  16. relaycommon "one-api/relay/common"
  17. relayconstant "one-api/relay/constant"
  18. "one-api/service"
  19. "strings"
  20. "time"
  21. "github.com/gin-gonic/gin"
  22. )
  23. func getAndValidateTextRequest(c *gin.Context, relayInfo *relaycommon.RelayInfo) (*dto.GeneralOpenAIRequest, error) {
  24. textRequest := &dto.GeneralOpenAIRequest{}
  25. err := common.UnmarshalBodyReusable(c, textRequest)
  26. if err != nil {
  27. return nil, err
  28. }
  29. if relayInfo.RelayMode == relayconstant.RelayModeModerations && textRequest.Model == "" {
  30. textRequest.Model = "text-moderation-latest"
  31. }
  32. if relayInfo.RelayMode == relayconstant.RelayModeEmbeddings && textRequest.Model == "" {
  33. textRequest.Model = c.Param("model")
  34. }
  35. if textRequest.MaxTokens < 0 || textRequest.MaxTokens > math.MaxInt32/2 {
  36. return nil, errors.New("max_tokens is invalid")
  37. }
  38. if textRequest.Model == "" {
  39. return nil, errors.New("model is required")
  40. }
  41. switch relayInfo.RelayMode {
  42. case relayconstant.RelayModeCompletions:
  43. if textRequest.Prompt == "" {
  44. return nil, errors.New("field prompt is required")
  45. }
  46. case relayconstant.RelayModeChatCompletions:
  47. if textRequest.Messages == nil || len(textRequest.Messages) == 0 {
  48. return nil, errors.New("field messages is required")
  49. }
  50. case relayconstant.RelayModeEmbeddings:
  51. case relayconstant.RelayModeModerations:
  52. if textRequest.Input == "" || textRequest.Input == nil {
  53. return nil, errors.New("field input is required")
  54. }
  55. case relayconstant.RelayModeEdits:
  56. if textRequest.Instruction == "" {
  57. return nil, errors.New("field instruction is required")
  58. }
  59. }
  60. relayInfo.IsStream = textRequest.Stream
  61. return textRequest, nil
  62. }
  63. func TextHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
  64. relayInfo := relaycommon.GenRelayInfo(c)
  65. // get & validate textRequest 获取并验证文本请求
  66. textRequest, err := getAndValidateTextRequest(c, relayInfo)
  67. if err != nil {
  68. common.LogError(c, fmt.Sprintf("getAndValidateTextRequest failed: %s", err.Error()))
  69. return service.OpenAIErrorWrapperLocal(err, "invalid_text_request", http.StatusBadRequest)
  70. }
  71. // map model name
  72. //isModelMapped := false
  73. modelMapping := c.GetString("model_mapping")
  74. //isModelMapped := false
  75. if modelMapping != "" && modelMapping != "{}" {
  76. modelMap := make(map[string]string)
  77. err := json.Unmarshal([]byte(modelMapping), &modelMap)
  78. if err != nil {
  79. return service.OpenAIErrorWrapperLocal(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError)
  80. }
  81. if modelMap[textRequest.Model] != "" {
  82. //isModelMapped = true
  83. textRequest.Model = modelMap[textRequest.Model]
  84. // set upstream model name
  85. //isModelMapped = true
  86. }
  87. }
  88. relayInfo.UpstreamModelName = textRequest.Model
  89. modelPrice, getModelPriceSuccess := common.GetModelPrice(textRequest.Model, false)
  90. groupRatio := common.GetGroupRatio(relayInfo.Group)
  91. var preConsumedQuota int
  92. var ratio float64
  93. var modelRatio float64
  94. //err := service.SensitiveWordsCheck(textRequest)
  95. if constant.ShouldCheckPromptSensitive() {
  96. err = checkRequestSensitive(textRequest, relayInfo)
  97. if err != nil {
  98. return service.OpenAIErrorWrapperLocal(err, "sensitive_words_detected", http.StatusBadRequest)
  99. }
  100. }
  101. promptTokens, err := getPromptTokens(textRequest, relayInfo)
  102. // count messages token error 计算promptTokens错误
  103. if err != nil {
  104. return service.OpenAIErrorWrapper(err, "count_token_messages_failed", http.StatusInternalServerError)
  105. }
  106. if !getModelPriceSuccess {
  107. preConsumedTokens := common.PreConsumedQuota
  108. if textRequest.MaxTokens != 0 {
  109. preConsumedTokens = promptTokens + int(textRequest.MaxTokens)
  110. }
  111. modelRatio = common.GetModelRatio(textRequest.Model)
  112. ratio = modelRatio * groupRatio
  113. preConsumedQuota = int(float64(preConsumedTokens) * ratio)
  114. } else {
  115. preConsumedQuota = int(modelPrice * common.QuotaPerUnit * groupRatio)
  116. }
  117. // pre-consume quota 预消耗配额
  118. preConsumedQuota, userQuota, openaiErr := preConsumeQuota(c, preConsumedQuota, relayInfo)
  119. if openaiErr != nil {
  120. return openaiErr
  121. }
  122. defer func() {
  123. if openaiErr != nil {
  124. returnPreConsumedQuota(c, relayInfo, userQuota, preConsumedQuota)
  125. }
  126. }()
  127. includeUsage := false
  128. // 判断用户是否需要返回使用情况
  129. if textRequest.StreamOptions != nil && textRequest.StreamOptions.IncludeUsage {
  130. includeUsage = true
  131. }
  132. // 如果不支持StreamOptions,将StreamOptions设置为nil
  133. if !relayInfo.SupportStreamOptions || !textRequest.Stream {
  134. textRequest.StreamOptions = nil
  135. } else {
  136. // 如果支持StreamOptions,且请求中没有设置StreamOptions,根据配置文件设置StreamOptions
  137. if constant.ForceStreamOption {
  138. textRequest.StreamOptions = &dto.StreamOptions{
  139. IncludeUsage: true,
  140. }
  141. }
  142. }
  143. if includeUsage {
  144. relayInfo.ShouldIncludeUsage = true
  145. }
  146. adaptor := GetAdaptor(relayInfo.ApiType)
  147. if adaptor == nil {
  148. return service.OpenAIErrorWrapperLocal(fmt.Errorf("invalid api type: %d", relayInfo.ApiType), "invalid_api_type", http.StatusBadRequest)
  149. }
  150. adaptor.Init(relayInfo)
  151. var requestBody io.Reader
  152. //if relayInfo.ChannelType == common.ChannelTypeOpenAI && !isModelMapped {
  153. // body, err := common.GetRequestBody(c)
  154. // if err != nil {
  155. // return service.OpenAIErrorWrapperLocal(err, "get_request_body_failed", http.StatusInternalServerError)
  156. // }
  157. // requestBody = bytes.NewBuffer(body)
  158. //} else {
  159. //
  160. //}
  161. convertedRequest, err := adaptor.ConvertRequest(c, relayInfo, textRequest)
  162. if err != nil {
  163. return service.OpenAIErrorWrapperLocal(err, "convert_request_failed", http.StatusInternalServerError)
  164. }
  165. jsonData, err := sonic.Marshal(convertedRequest)
  166. if err != nil {
  167. return service.OpenAIErrorWrapperLocal(err, "json_marshal_failed", http.StatusInternalServerError)
  168. }
  169. requestBody = bytes.NewBuffer(jsonData)
  170. statusCodeMappingStr := c.GetString("status_code_mapping")
  171. var httpResp *http.Response
  172. resp, err := adaptor.DoRequest(c, relayInfo, requestBody)
  173. if err != nil {
  174. return service.OpenAIErrorWrapper(err, "do_request_failed", http.StatusInternalServerError)
  175. }
  176. if resp != nil {
  177. httpResp = resp.(*http.Response)
  178. relayInfo.IsStream = relayInfo.IsStream || strings.HasPrefix(httpResp.Header.Get("Content-Type"), "text/event-stream")
  179. if httpResp.StatusCode != http.StatusOK {
  180. openaiErr = service.RelayErrorHandler(httpResp)
  181. // reset status code 重置状态码
  182. service.ResetStatusCode(openaiErr, statusCodeMappingStr)
  183. return openaiErr
  184. }
  185. }
  186. usage, openaiErr := adaptor.DoResponse(c, httpResp, relayInfo)
  187. if openaiErr != nil {
  188. // reset status code 重置状态码
  189. service.ResetStatusCode(openaiErr, statusCodeMappingStr)
  190. return openaiErr
  191. }
  192. if strings.HasPrefix(relayInfo.UpstreamModelName, "gpt-4o-audio") {
  193. service.PostAudioConsumeQuota(c, relayInfo, usage.(*dto.Usage), ratio, preConsumedQuota, userQuota, modelRatio, groupRatio, modelPrice, getModelPriceSuccess, "")
  194. } else {
  195. postConsumeQuota(c, relayInfo, textRequest.Model, usage.(*dto.Usage), ratio, preConsumedQuota, userQuota, modelRatio, groupRatio, modelPrice, getModelPriceSuccess, "")
  196. }
  197. return nil
  198. }
  199. func getPromptTokens(textRequest *dto.GeneralOpenAIRequest, info *relaycommon.RelayInfo) (int, error) {
  200. var promptTokens int
  201. var err error
  202. switch info.RelayMode {
  203. case relayconstant.RelayModeChatCompletions:
  204. promptTokens, err = service.CountTokenChatRequest(*textRequest, textRequest.Model)
  205. case relayconstant.RelayModeCompletions:
  206. promptTokens, err = service.CountTokenInput(textRequest.Prompt, textRequest.Model)
  207. case relayconstant.RelayModeModerations:
  208. promptTokens, err = service.CountTokenInput(textRequest.Input, textRequest.Model)
  209. case relayconstant.RelayModeEmbeddings:
  210. promptTokens, err = service.CountTokenInput(textRequest.Input, textRequest.Model)
  211. default:
  212. err = errors.New("unknown relay mode")
  213. promptTokens = 0
  214. }
  215. info.PromptTokens = promptTokens
  216. return promptTokens, err
  217. }
  218. func checkRequestSensitive(textRequest *dto.GeneralOpenAIRequest, info *relaycommon.RelayInfo) error {
  219. var err error
  220. switch info.RelayMode {
  221. case relayconstant.RelayModeChatCompletions:
  222. err = service.CheckSensitiveMessages(textRequest.Messages)
  223. case relayconstant.RelayModeCompletions:
  224. err = service.CheckSensitiveInput(textRequest.Prompt)
  225. case relayconstant.RelayModeModerations:
  226. err = service.CheckSensitiveInput(textRequest.Input)
  227. case relayconstant.RelayModeEmbeddings:
  228. err = service.CheckSensitiveInput(textRequest.Input)
  229. }
  230. return err
  231. }
  232. // 预扣费并返回用户剩余配额
  233. func preConsumeQuota(c *gin.Context, preConsumedQuota int, relayInfo *relaycommon.RelayInfo) (int, int, *dto.OpenAIErrorWithStatusCode) {
  234. userQuota, err := model.CacheGetUserQuota(relayInfo.UserId)
  235. if err != nil {
  236. return 0, 0, service.OpenAIErrorWrapperLocal(err, "get_user_quota_failed", http.StatusInternalServerError)
  237. }
  238. if userQuota <= 0 {
  239. return 0, 0, service.OpenAIErrorWrapperLocal(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
  240. }
  241. if userQuota-preConsumedQuota < 0 {
  242. return 0, 0, service.OpenAIErrorWrapperLocal(errors.New(fmt.Sprintf("chat pre-consumed quota failed, user quota: %d, need quota: %d", userQuota, preConsumedQuota)), "insufficient_user_quota", http.StatusBadRequest)
  243. }
  244. err = model.CacheDecreaseUserQuota(relayInfo.UserId, preConsumedQuota)
  245. if err != nil {
  246. return 0, 0, service.OpenAIErrorWrapperLocal(err, "decrease_user_quota_failed", http.StatusInternalServerError)
  247. }
  248. if userQuota > 100*preConsumedQuota {
  249. // 用户额度充足,判断令牌额度是否充足
  250. if !relayInfo.TokenUnlimited {
  251. // 非无限令牌,判断令牌额度是否充足
  252. tokenQuota := c.GetInt("token_quota")
  253. if tokenQuota > 100*preConsumedQuota {
  254. // 令牌额度充足,信任令牌
  255. preConsumedQuota = 0
  256. common.LogInfo(c, fmt.Sprintf("user %d quota %d and token %d quota %d are enough, trusted and no need to pre-consume", relayInfo.UserId, userQuota, relayInfo.TokenId, tokenQuota))
  257. }
  258. } else {
  259. // in this case, we do not pre-consume quota
  260. // because the user has enough quota
  261. preConsumedQuota = 0
  262. common.LogInfo(c, fmt.Sprintf("user %d with unlimited token has enough quota %d, trusted and no need to pre-consume", relayInfo.UserId, userQuota))
  263. }
  264. }
  265. if preConsumedQuota > 0 {
  266. userQuota, err = model.PreConsumeTokenQuota(relayInfo, preConsumedQuota)
  267. if err != nil {
  268. return 0, 0, service.OpenAIErrorWrapperLocal(err, "pre_consume_token_quota_failed", http.StatusForbidden)
  269. }
  270. }
  271. return preConsumedQuota, userQuota, nil
  272. }
  273. func returnPreConsumedQuota(c *gin.Context, relayInfo *relaycommon.RelayInfo, userQuota int, preConsumedQuota int) {
  274. if preConsumedQuota != 0 {
  275. go func(ctx context.Context) {
  276. // return pre-consumed quota
  277. err := model.PostConsumeTokenQuota(relayInfo, userQuota, -preConsumedQuota, 0, false)
  278. if err != nil {
  279. common.SysError("error return pre-consumed quota: " + err.Error())
  280. }
  281. }(c)
  282. }
  283. }
  284. func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, modelName string,
  285. usage *dto.Usage, ratio float64, preConsumedQuota int, userQuota int, modelRatio float64, groupRatio float64,
  286. modelPrice float64, usePrice bool, extraContent string) {
  287. if usage == nil {
  288. usage = &dto.Usage{
  289. PromptTokens: relayInfo.PromptTokens,
  290. CompletionTokens: 0,
  291. TotalTokens: relayInfo.PromptTokens,
  292. }
  293. extraContent += " ,(可能是请求出错)"
  294. }
  295. useTimeSeconds := time.Now().Unix() - relayInfo.StartTime.Unix()
  296. promptTokens := usage.PromptTokens
  297. completionTokens := usage.CompletionTokens
  298. tokenName := ctx.GetString("token_name")
  299. completionRatio := common.GetCompletionRatio(modelName)
  300. quota := 0
  301. if !usePrice {
  302. quota = promptTokens + int(math.Round(float64(completionTokens)*completionRatio))
  303. quota = int(math.Round(float64(quota) * ratio))
  304. if ratio != 0 && quota <= 0 {
  305. quota = 1
  306. }
  307. } else {
  308. quota = int(modelPrice * common.QuotaPerUnit * groupRatio)
  309. }
  310. totalTokens := promptTokens + completionTokens
  311. var logContent string
  312. if !usePrice {
  313. logContent = fmt.Sprintf("模型倍率 %.2f,补全倍率 %.2f,分组倍率 %.2f", modelRatio, completionRatio, groupRatio)
  314. } else {
  315. logContent = fmt.Sprintf("模型价格 %.2f,分组倍率 %.2f", modelPrice, groupRatio)
  316. }
  317. // record all the consume log even if quota is 0
  318. if totalTokens == 0 {
  319. // in this case, must be some error happened
  320. // we cannot just return, because we may have to return the pre-consumed quota
  321. quota = 0
  322. logContent += fmt.Sprintf("(可能是上游超时)")
  323. common.LogError(ctx, fmt.Sprintf("total tokens is 0, cannot consume quota, userId %d, channelId %d, "+
  324. "tokenId %d, model %s, pre-consumed quota %d", relayInfo.UserId, relayInfo.ChannelId, relayInfo.TokenId, modelName, preConsumedQuota))
  325. } else {
  326. //if sensitiveResp != nil {
  327. // logContent += fmt.Sprintf(",敏感词:%s", strings.Join(sensitiveResp.SensitiveWords, ", "))
  328. //}
  329. quotaDelta := quota - preConsumedQuota
  330. if quotaDelta != 0 {
  331. err := model.PostConsumeTokenQuota(relayInfo, userQuota, quotaDelta, preConsumedQuota, true)
  332. if err != nil {
  333. common.LogError(ctx, "error consuming token remain quota: "+err.Error())
  334. }
  335. }
  336. err := model.CacheUpdateUserQuota(relayInfo.UserId)
  337. if err != nil {
  338. common.LogError(ctx, "error update user quota cache: "+err.Error())
  339. }
  340. model.UpdateUserUsedQuotaAndRequestCount(relayInfo.UserId, quota)
  341. model.UpdateChannelUsedQuota(relayInfo.ChannelId, quota)
  342. }
  343. logModel := modelName
  344. if strings.HasPrefix(logModel, "gpt-4-gizmo") {
  345. logModel = "gpt-4-gizmo-*"
  346. logContent += fmt.Sprintf(",模型 %s", modelName)
  347. }
  348. if strings.HasPrefix(logModel, "gpt-4o-gizmo") {
  349. logModel = "gpt-4o-gizmo-*"
  350. logContent += fmt.Sprintf(",模型 %s", modelName)
  351. }
  352. if extraContent != "" {
  353. logContent += ", " + extraContent
  354. }
  355. other := service.GenerateTextOtherInfo(ctx, relayInfo, modelRatio, groupRatio, completionRatio, modelPrice)
  356. model.RecordConsumeLog(ctx, relayInfo.UserId, relayInfo.ChannelId, promptTokens, completionTokens, logModel,
  357. tokenName, quota, logContent, relayInfo.TokenId, userQuota, int(useTimeSeconds), relayInfo.IsStream, other)
  358. //if quota != 0 {
  359. //
  360. //}
  361. }