relay-text.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. package relay
  2. import (
  3. "bytes"
  4. "encoding/json"
  5. "errors"
  6. "fmt"
  7. "github.com/bytedance/gopkg/util/gopool"
  8. "io"
  9. "math"
  10. "net/http"
  11. "one-api/common"
  12. "one-api/constant"
  13. "one-api/dto"
  14. "one-api/model"
  15. relaycommon "one-api/relay/common"
  16. relayconstant "one-api/relay/constant"
  17. "one-api/relay/helper"
  18. "one-api/service"
  19. "one-api/setting"
  20. "strings"
  21. "time"
  22. "github.com/gin-gonic/gin"
  23. )
  24. func getAndValidateTextRequest(c *gin.Context, relayInfo *relaycommon.RelayInfo) (*dto.GeneralOpenAIRequest, error) {
  25. textRequest := &dto.GeneralOpenAIRequest{}
  26. err := common.UnmarshalBodyReusable(c, textRequest)
  27. if err != nil {
  28. return nil, err
  29. }
  30. if relayInfo.RelayMode == relayconstant.RelayModeModerations && textRequest.Model == "" {
  31. textRequest.Model = "text-moderation-latest"
  32. }
  33. if relayInfo.RelayMode == relayconstant.RelayModeEmbeddings && textRequest.Model == "" {
  34. textRequest.Model = c.Param("model")
  35. }
  36. if textRequest.MaxTokens > math.MaxInt32/2 {
  37. return nil, errors.New("max_tokens is invalid")
  38. }
  39. if textRequest.Model == "" {
  40. return nil, errors.New("model is required")
  41. }
  42. switch relayInfo.RelayMode {
  43. case relayconstant.RelayModeCompletions:
  44. if textRequest.Prompt == "" {
  45. return nil, errors.New("field prompt is required")
  46. }
  47. case relayconstant.RelayModeChatCompletions:
  48. if len(textRequest.Messages) == 0 {
  49. return nil, errors.New("field messages is required")
  50. }
  51. case relayconstant.RelayModeEmbeddings:
  52. case relayconstant.RelayModeModerations:
  53. if textRequest.Input == nil || textRequest.Input == "" {
  54. return nil, errors.New("field input is required")
  55. }
  56. case relayconstant.RelayModeEdits:
  57. if textRequest.Instruction == "" {
  58. return nil, errors.New("field instruction is required")
  59. }
  60. }
  61. relayInfo.IsStream = textRequest.Stream
  62. return textRequest, nil
  63. }
  64. func TextHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
  65. relayInfo := relaycommon.GenRelayInfo(c)
  66. // get & validate textRequest 获取并验证文本请求
  67. textRequest, err := getAndValidateTextRequest(c, relayInfo)
  68. if err != nil {
  69. common.LogError(c, fmt.Sprintf("getAndValidateTextRequest failed: %s", err.Error()))
  70. return service.OpenAIErrorWrapperLocal(err, "invalid_text_request", http.StatusBadRequest)
  71. }
  72. if setting.ShouldCheckPromptSensitive() {
  73. words, err := checkRequestSensitive(textRequest, relayInfo)
  74. if err != nil {
  75. common.LogWarn(c, fmt.Sprintf("user sensitive words detected: %s", strings.Join(words, ", ")))
  76. return service.OpenAIErrorWrapperLocal(err, "sensitive_words_detected", http.StatusBadRequest)
  77. }
  78. }
  79. err = helper.ModelMappedHelper(c, relayInfo)
  80. if err != nil {
  81. return service.OpenAIErrorWrapperLocal(err, "model_mapped_error", http.StatusInternalServerError)
  82. }
  83. textRequest.Model = relayInfo.UpstreamModelName
  84. // 获取 promptTokens,如果上下文中已经存在,则直接使用
  85. var promptTokens int
  86. if value, exists := c.Get("prompt_tokens"); exists {
  87. promptTokens = value.(int)
  88. relayInfo.PromptTokens = promptTokens
  89. } else {
  90. promptTokens, err = getPromptTokens(textRequest, relayInfo)
  91. // count messages token error 计算promptTokens错误
  92. if err != nil {
  93. return service.OpenAIErrorWrapper(err, "count_token_messages_failed", http.StatusInternalServerError)
  94. }
  95. c.Set("prompt_tokens", promptTokens)
  96. }
  97. priceData, err := helper.ModelPriceHelper(c, relayInfo, promptTokens, int(textRequest.MaxTokens))
  98. if err != nil {
  99. return service.OpenAIErrorWrapperLocal(err, "model_price_error", http.StatusInternalServerError)
  100. }
  101. // pre-consume quota 预消耗配额
  102. preConsumedQuota, userQuota, openaiErr := preConsumeQuota(c, priceData.ShouldPreConsumedQuota, relayInfo)
  103. if openaiErr != nil {
  104. return openaiErr
  105. }
  106. defer func() {
  107. if openaiErr != nil {
  108. returnPreConsumedQuota(c, relayInfo, userQuota, preConsumedQuota)
  109. }
  110. }()
  111. includeUsage := false
  112. // 判断用户是否需要返回使用情况
  113. if textRequest.StreamOptions != nil && textRequest.StreamOptions.IncludeUsage {
  114. includeUsage = true
  115. }
  116. // 如果不支持StreamOptions,将StreamOptions设置为nil
  117. if !relayInfo.SupportStreamOptions || !textRequest.Stream {
  118. textRequest.StreamOptions = nil
  119. } else {
  120. // 如果支持StreamOptions,且请求中没有设置StreamOptions,根据配置文件设置StreamOptions
  121. if constant.ForceStreamOption {
  122. textRequest.StreamOptions = &dto.StreamOptions{
  123. IncludeUsage: true,
  124. }
  125. }
  126. }
  127. if includeUsage {
  128. relayInfo.ShouldIncludeUsage = true
  129. }
  130. adaptor := GetAdaptor(relayInfo.ApiType)
  131. if adaptor == nil {
  132. return service.OpenAIErrorWrapperLocal(fmt.Errorf("invalid api type: %d", relayInfo.ApiType), "invalid_api_type", http.StatusBadRequest)
  133. }
  134. adaptor.Init(relayInfo)
  135. var requestBody io.Reader
  136. //if relayInfo.ChannelType == common.ChannelTypeOpenAI && !isModelMapped {
  137. // body, err := common.GetRequestBody(c)
  138. // if err != nil {
  139. // return service.OpenAIErrorWrapperLocal(err, "get_request_body_failed", http.StatusInternalServerError)
  140. // }
  141. // requestBody = bytes.NewBuffer(body)
  142. //} else {
  143. //
  144. //}
  145. convertedRequest, err := adaptor.ConvertRequest(c, relayInfo, textRequest)
  146. if err != nil {
  147. return service.OpenAIErrorWrapperLocal(err, "convert_request_failed", http.StatusInternalServerError)
  148. }
  149. jsonData, err := json.Marshal(convertedRequest)
  150. if err != nil {
  151. return service.OpenAIErrorWrapperLocal(err, "json_marshal_failed", http.StatusInternalServerError)
  152. }
  153. requestBody = bytes.NewBuffer(jsonData)
  154. statusCodeMappingStr := c.GetString("status_code_mapping")
  155. var httpResp *http.Response
  156. resp, err := adaptor.DoRequest(c, relayInfo, requestBody)
  157. if err != nil {
  158. return service.OpenAIErrorWrapper(err, "do_request_failed", http.StatusInternalServerError)
  159. }
  160. if resp != nil {
  161. httpResp = resp.(*http.Response)
  162. relayInfo.IsStream = relayInfo.IsStream || strings.HasPrefix(httpResp.Header.Get("Content-Type"), "text/event-stream")
  163. if httpResp.StatusCode != http.StatusOK {
  164. openaiErr = service.RelayErrorHandler(httpResp)
  165. // reset status code 重置状态码
  166. service.ResetStatusCode(openaiErr, statusCodeMappingStr)
  167. return openaiErr
  168. }
  169. }
  170. usage, openaiErr := adaptor.DoResponse(c, httpResp, relayInfo)
  171. if openaiErr != nil {
  172. // reset status code 重置状态码
  173. service.ResetStatusCode(openaiErr, statusCodeMappingStr)
  174. return openaiErr
  175. }
  176. if strings.HasPrefix(relayInfo.OriginModelName, "gpt-4o-audio") {
  177. service.PostAudioConsumeQuota(c, relayInfo, usage.(*dto.Usage), preConsumedQuota, userQuota, priceData, "")
  178. } else {
  179. postConsumeQuota(c, relayInfo, usage.(*dto.Usage), preConsumedQuota, userQuota, priceData, "")
  180. }
  181. return nil
  182. }
  183. func getPromptTokens(textRequest *dto.GeneralOpenAIRequest, info *relaycommon.RelayInfo) (int, error) {
  184. var promptTokens int
  185. var err error
  186. switch info.RelayMode {
  187. case relayconstant.RelayModeChatCompletions:
  188. promptTokens, err = service.CountTokenChatRequest(info, *textRequest)
  189. case relayconstant.RelayModeCompletions:
  190. promptTokens, err = service.CountTokenInput(textRequest.Prompt, textRequest.Model)
  191. case relayconstant.RelayModeModerations:
  192. promptTokens, err = service.CountTokenInput(textRequest.Input, textRequest.Model)
  193. case relayconstant.RelayModeEmbeddings:
  194. promptTokens, err = service.CountTokenInput(textRequest.Input, textRequest.Model)
  195. default:
  196. err = errors.New("unknown relay mode")
  197. promptTokens = 0
  198. }
  199. info.PromptTokens = promptTokens
  200. return promptTokens, err
  201. }
  202. func checkRequestSensitive(textRequest *dto.GeneralOpenAIRequest, info *relaycommon.RelayInfo) ([]string, error) {
  203. var err error
  204. var words []string
  205. switch info.RelayMode {
  206. case relayconstant.RelayModeChatCompletions:
  207. words, err = service.CheckSensitiveMessages(textRequest.Messages)
  208. case relayconstant.RelayModeCompletions:
  209. words, err = service.CheckSensitiveInput(textRequest.Prompt)
  210. case relayconstant.RelayModeModerations:
  211. words, err = service.CheckSensitiveInput(textRequest.Input)
  212. case relayconstant.RelayModeEmbeddings:
  213. words, err = service.CheckSensitiveInput(textRequest.Input)
  214. }
  215. return words, err
  216. }
  217. // 预扣费并返回用户剩余配额
  218. func preConsumeQuota(c *gin.Context, preConsumedQuota int, relayInfo *relaycommon.RelayInfo) (int, int, *dto.OpenAIErrorWithStatusCode) {
  219. userQuota, err := model.GetUserQuota(relayInfo.UserId, false)
  220. if err != nil {
  221. return 0, 0, service.OpenAIErrorWrapperLocal(err, "get_user_quota_failed", http.StatusInternalServerError)
  222. }
  223. if userQuota <= 0 {
  224. return 0, 0, service.OpenAIErrorWrapperLocal(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
  225. }
  226. if userQuota-preConsumedQuota < 0 {
  227. return 0, 0, service.OpenAIErrorWrapperLocal(fmt.Errorf("chat pre-consumed quota failed, user quota: %s, need quota: %s", common.FormatQuota(userQuota), common.FormatQuota(preConsumedQuota)), "insufficient_user_quota", http.StatusForbidden)
  228. }
  229. relayInfo.UserQuota = userQuota
  230. if userQuota > 100*preConsumedQuota {
  231. // 用户额度充足,判断令牌额度是否充足
  232. if !relayInfo.TokenUnlimited {
  233. // 非无限令牌,判断令牌额度是否充足
  234. tokenQuota := c.GetInt("token_quota")
  235. if tokenQuota > 100*preConsumedQuota {
  236. // 令牌额度充足,信任令牌
  237. preConsumedQuota = 0
  238. common.LogInfo(c, fmt.Sprintf("user %d quota %s and token %d quota %d are enough, trusted and no need to pre-consume", relayInfo.UserId, common.FormatQuota(userQuota), relayInfo.TokenId, tokenQuota))
  239. }
  240. } else {
  241. // in this case, we do not pre-consume quota
  242. // because the user has enough quota
  243. preConsumedQuota = 0
  244. common.LogInfo(c, fmt.Sprintf("user %d with unlimited token has enough quota %s, trusted and no need to pre-consume", relayInfo.UserId, common.FormatQuota(userQuota)))
  245. }
  246. }
  247. if preConsumedQuota > 0 {
  248. err := service.PreConsumeTokenQuota(relayInfo, preConsumedQuota)
  249. if err != nil {
  250. return 0, 0, service.OpenAIErrorWrapperLocal(err, "pre_consume_token_quota_failed", http.StatusForbidden)
  251. }
  252. err = model.DecreaseUserQuota(relayInfo.UserId, preConsumedQuota)
  253. if err != nil {
  254. return 0, 0, service.OpenAIErrorWrapperLocal(err, "decrease_user_quota_failed", http.StatusInternalServerError)
  255. }
  256. }
  257. return preConsumedQuota, userQuota, nil
  258. }
  259. func returnPreConsumedQuota(c *gin.Context, relayInfo *relaycommon.RelayInfo, userQuota int, preConsumedQuota int) {
  260. if preConsumedQuota != 0 {
  261. gopool.Go(func() {
  262. relayInfoCopy := *relayInfo
  263. err := service.PostConsumeQuota(&relayInfoCopy, -preConsumedQuota, 0, false)
  264. if err != nil {
  265. common.SysError("error return pre-consumed quota: " + err.Error())
  266. }
  267. })
  268. }
  269. }
  270. func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
  271. usage *dto.Usage, preConsumedQuota int, userQuota int, priceData helper.PriceData, extraContent string) {
  272. if usage == nil {
  273. usage = &dto.Usage{
  274. PromptTokens: relayInfo.PromptTokens,
  275. CompletionTokens: 0,
  276. TotalTokens: relayInfo.PromptTokens,
  277. }
  278. extraContent += " ,(可能是请求出错)"
  279. }
  280. useTimeSeconds := time.Now().Unix() - relayInfo.StartTime.Unix()
  281. promptTokens := usage.PromptTokens
  282. completionTokens := usage.CompletionTokens
  283. modelName := relayInfo.OriginModelName
  284. tokenName := ctx.GetString("token_name")
  285. completionRatio := setting.GetCompletionRatio(modelName)
  286. ratio := priceData.ModelRatio * priceData.GroupRatio
  287. modelRatio := priceData.ModelRatio
  288. groupRatio := priceData.GroupRatio
  289. modelPrice := priceData.ModelPrice
  290. usePrice := priceData.UsePrice
  291. quota := 0
  292. if !priceData.UsePrice {
  293. quota = promptTokens + int(math.Round(float64(completionTokens)*completionRatio))
  294. quota = int(math.Round(float64(quota) * ratio))
  295. if ratio != 0 && quota <= 0 {
  296. quota = 1
  297. }
  298. } else {
  299. quota = int(modelPrice * common.QuotaPerUnit * groupRatio)
  300. }
  301. totalTokens := promptTokens + completionTokens
  302. var logContent string
  303. if !usePrice {
  304. logContent = fmt.Sprintf("模型倍率 %.2f,补全倍率 %.2f,分组倍率 %.2f", modelRatio, completionRatio, groupRatio)
  305. } else {
  306. logContent = fmt.Sprintf("模型价格 %.2f,分组倍率 %.2f", modelPrice, groupRatio)
  307. }
  308. // record all the consume log even if quota is 0
  309. if totalTokens == 0 {
  310. // in this case, must be some error happened
  311. // we cannot just return, because we may have to return the pre-consumed quota
  312. quota = 0
  313. logContent += fmt.Sprintf("(可能是上游超时)")
  314. common.LogError(ctx, fmt.Sprintf("total tokens is 0, cannot consume quota, userId %d, channelId %d, "+
  315. "tokenId %d, model %s, pre-consumed quota %d", relayInfo.UserId, relayInfo.ChannelId, relayInfo.TokenId, modelName, preConsumedQuota))
  316. } else {
  317. //if sensitiveResp != nil {
  318. // logContent += fmt.Sprintf(",敏感词:%s", strings.Join(sensitiveResp.SensitiveWords, ", "))
  319. //}
  320. quotaDelta := quota - preConsumedQuota
  321. if quotaDelta != 0 {
  322. err := service.PostConsumeQuota(relayInfo, quotaDelta, preConsumedQuota, true)
  323. if err != nil {
  324. common.LogError(ctx, "error consuming token remain quota: "+err.Error())
  325. }
  326. }
  327. model.UpdateUserUsedQuotaAndRequestCount(relayInfo.UserId, quota)
  328. model.UpdateChannelUsedQuota(relayInfo.ChannelId, quota)
  329. }
  330. logModel := modelName
  331. if strings.HasPrefix(logModel, "gpt-4-gizmo") {
  332. logModel = "gpt-4-gizmo-*"
  333. logContent += fmt.Sprintf(",模型 %s", modelName)
  334. }
  335. if strings.HasPrefix(logModel, "gpt-4o-gizmo") {
  336. logModel = "gpt-4o-gizmo-*"
  337. logContent += fmt.Sprintf(",模型 %s", modelName)
  338. }
  339. if extraContent != "" {
  340. logContent += ", " + extraContent
  341. }
  342. other := service.GenerateTextOtherInfo(ctx, relayInfo, modelRatio, groupRatio, completionRatio, modelPrice)
  343. model.RecordConsumeLog(ctx, relayInfo.UserId, relayInfo.ChannelId, promptTokens, completionTokens, logModel,
  344. tokenName, quota, logContent, relayInfo.TokenId, userQuota, int(useTimeSeconds), relayInfo.IsStream, relayInfo.Group, other)
  345. //if quota != 0 {
  346. //
  347. //}
  348. }