claude_handler.go 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. package relay
  2. import (
  3. "bytes"
  4. "encoding/json"
  5. "errors"
  6. "fmt"
  7. "github.com/gin-gonic/gin"
  8. "io"
  9. "net/http"
  10. "one-api/common"
  11. "one-api/dto"
  12. relaycommon "one-api/relay/common"
  13. "one-api/relay/helper"
  14. "one-api/service"
  15. "one-api/setting/model_setting"
  16. "strings"
  17. )
  18. func getAndValidateClaudeRequest(c *gin.Context) (textRequest *dto.ClaudeRequest, err error) {
  19. textRequest = &dto.ClaudeRequest{}
  20. err = c.ShouldBindJSON(textRequest)
  21. if err != nil {
  22. return nil, err
  23. }
  24. if textRequest.Messages == nil || len(textRequest.Messages) == 0 {
  25. return nil, errors.New("field messages is required")
  26. }
  27. if textRequest.Model == "" {
  28. return nil, errors.New("field model is required")
  29. }
  30. return textRequest, nil
  31. }
  32. func ClaudeHelper(c *gin.Context) (claudeError *dto.ClaudeErrorWithStatusCode) {
  33. relayInfo := relaycommon.GenRelayInfoClaude(c)
  34. // get & validate textRequest 获取并验证文本请求
  35. textRequest, err := getAndValidateClaudeRequest(c)
  36. if err != nil {
  37. return service.ClaudeErrorWrapperLocal(err, "invalid_claude_request", http.StatusBadRequest)
  38. }
  39. if textRequest.Stream {
  40. relayInfo.IsStream = true
  41. }
  42. err = helper.ModelMappedHelper(c, relayInfo)
  43. if err != nil {
  44. return service.ClaudeErrorWrapperLocal(err, "model_mapped_error", http.StatusInternalServerError)
  45. }
  46. textRequest.Model = relayInfo.UpstreamModelName
  47. promptTokens, err := getClaudePromptTokens(textRequest, relayInfo)
  48. // count messages token error 计算promptTokens错误
  49. if err != nil {
  50. return service.ClaudeErrorWrapperLocal(err, "count_token_messages_failed", http.StatusInternalServerError)
  51. }
  52. priceData, err := helper.ModelPriceHelper(c, relayInfo, promptTokens, int(textRequest.MaxTokens))
  53. if err != nil {
  54. return service.ClaudeErrorWrapperLocal(err, "model_price_error", http.StatusInternalServerError)
  55. }
  56. // pre-consume quota 预消耗配额
  57. preConsumedQuota, userQuota, openaiErr := preConsumeQuota(c, priceData.ShouldPreConsumedQuota, relayInfo)
  58. if openaiErr != nil {
  59. return service.OpenAIErrorToClaudeError(openaiErr)
  60. }
  61. defer func() {
  62. if openaiErr != nil {
  63. returnPreConsumedQuota(c, relayInfo, userQuota, preConsumedQuota)
  64. }
  65. }()
  66. adaptor := GetAdaptor(relayInfo.ApiType)
  67. if adaptor == nil {
  68. return service.ClaudeErrorWrapperLocal(fmt.Errorf("invalid api type: %d", relayInfo.ApiType), "invalid_api_type", http.StatusBadRequest)
  69. }
  70. adaptor.Init(relayInfo)
  71. var requestBody io.Reader
  72. if textRequest.MaxTokens == 0 {
  73. textRequest.MaxTokens = uint(model_setting.GetClaudeSettings().GetDefaultMaxTokens(textRequest.Model))
  74. }
  75. if model_setting.GetClaudeSettings().ThinkingAdapterEnabled &&
  76. strings.HasSuffix(textRequest.Model, "-thinking") {
  77. if textRequest.Thinking == nil {
  78. // 因为BudgetTokens 必须大于1024
  79. if textRequest.MaxTokens < 1280 {
  80. textRequest.MaxTokens = 1280
  81. }
  82. // BudgetTokens 为 max_tokens 的 80%
  83. textRequest.Thinking = &dto.Thinking{
  84. Type: "enabled",
  85. BudgetTokens: int(float64(textRequest.MaxTokens) * model_setting.GetClaudeSettings().ThinkingAdapterBudgetTokensPercentage),
  86. }
  87. // TODO: 临时处理
  88. // https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking#important-considerations-when-using-extended-thinking
  89. textRequest.TopP = 0
  90. textRequest.Temperature = common.GetPointer[float64](1.0)
  91. }
  92. textRequest.Model = strings.TrimSuffix(textRequest.Model, "-thinking")
  93. relayInfo.UpstreamModelName = textRequest.Model
  94. }
  95. convertedRequest, err := adaptor.ConvertClaudeRequest(c, relayInfo, textRequest)
  96. if err != nil {
  97. return service.ClaudeErrorWrapperLocal(err, "convert_request_failed", http.StatusInternalServerError)
  98. }
  99. jsonData, err := json.Marshal(convertedRequest)
  100. if common.DebugEnabled {
  101. println("requestBody: ", string(jsonData))
  102. }
  103. if err != nil {
  104. return service.ClaudeErrorWrapperLocal(err, "json_marshal_failed", http.StatusInternalServerError)
  105. }
  106. requestBody = bytes.NewBuffer(jsonData)
  107. statusCodeMappingStr := c.GetString("status_code_mapping")
  108. var httpResp *http.Response
  109. resp, err := adaptor.DoRequest(c, relayInfo, requestBody)
  110. if err != nil {
  111. return service.ClaudeErrorWrapperLocal(err, "do_request_failed", http.StatusInternalServerError)
  112. }
  113. if resp != nil {
  114. httpResp = resp.(*http.Response)
  115. relayInfo.IsStream = relayInfo.IsStream || strings.HasPrefix(httpResp.Header.Get("Content-Type"), "text/event-stream")
  116. if httpResp.StatusCode != http.StatusOK {
  117. openaiErr = service.RelayErrorHandler(httpResp, false)
  118. // reset status code 重置状态码
  119. service.ResetStatusCode(openaiErr, statusCodeMappingStr)
  120. return service.OpenAIErrorToClaudeError(openaiErr)
  121. }
  122. }
  123. usage, openaiErr := adaptor.DoResponse(c, httpResp, relayInfo)
  124. //log.Printf("usage: %v", usage)
  125. if openaiErr != nil {
  126. // reset status code 重置状态码
  127. service.ResetStatusCode(openaiErr, statusCodeMappingStr)
  128. return service.OpenAIErrorToClaudeError(openaiErr)
  129. }
  130. service.PostClaudeConsumeQuota(c, relayInfo, usage.(*dto.Usage), preConsumedQuota, userQuota, priceData, "")
  131. return nil
  132. }
  133. func getClaudePromptTokens(textRequest *dto.ClaudeRequest, info *relaycommon.RelayInfo) (int, error) {
  134. var promptTokens int
  135. var err error
  136. switch info.RelayMode {
  137. default:
  138. promptTokens, err = service.CountTokenClaudeRequest(*textRequest, info.UpstreamModelName)
  139. }
  140. info.PromptTokens = promptTokens
  141. return promptTokens, err
  142. }