Просмотр исходного кода

feat: support DeepSeek V4 reasoning suffix handling (#4428)

Calcium-Ion 2 недель назад
Родитель
Сommit
8993386743

+ 76 - 1
relay/channel/deepseek/adaptor.go

@@ -7,12 +7,14 @@ import (
 	"net/http"
 	"strings"
 
+	"github.com/QuantumNous/new-api/common"
 	"github.com/QuantumNous/new-api/dto"
 	"github.com/QuantumNous/new-api/relay/channel"
 	"github.com/QuantumNous/new-api/relay/channel/claude"
 	"github.com/QuantumNous/new-api/relay/channel/openai"
 	relaycommon "github.com/QuantumNous/new-api/relay/common"
 	"github.com/QuantumNous/new-api/relay/constant"
+	"github.com/QuantumNous/new-api/setting/reasoning"
 	"github.com/QuantumNous/new-api/types"
 	"github.com/gin-gonic/gin"
 )
@@ -27,7 +29,18 @@ func (a *Adaptor) ConvertGeminiRequest(*gin.Context, *relaycommon.RelayInfo, *dt
 
 func (a *Adaptor) ConvertClaudeRequest(c *gin.Context, info *relaycommon.RelayInfo, req *dto.ClaudeRequest) (any, error) {
 	adaptor := claude.Adaptor{}
-	return adaptor.ConvertClaudeRequest(c, info, req)
+	convertedRequest, err := adaptor.ConvertClaudeRequest(c, info, req)
+	if err != nil {
+		return nil, err
+	}
+	claudeRequest, ok := convertedRequest.(*dto.ClaudeRequest)
+	if !ok {
+		return convertedRequest, nil
+	}
+	if err := applyDeepSeekV4ClaudeThinkingSuffix(info, claudeRequest); err != nil {
+		return nil, err
+	}
+	return claudeRequest, nil
 }
 
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
@@ -71,9 +84,71 @@ func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayIn
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}
+	if err := applyDeepSeekV4OpenAIThinkingSuffix(info, request); err != nil {
+		return nil, err
+	}
+
 	return request, nil
 }
 
+func applyDeepSeekV4OpenAIThinkingSuffix(info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) error {
+	modelName := request.Model
+	if info != nil && info.ChannelMeta != nil && info.UpstreamModelName != "" {
+		modelName = info.UpstreamModelName
+	}
+	baseModel, thinkingType, effort, ok := reasoning.ParseDeepSeekV4ThinkingSuffix(modelName)
+	if !ok {
+		return nil
+	}
+	thinking, err := common.Marshal(map[string]string{
+		"type": thinkingType,
+	})
+	if err != nil {
+		return fmt.Errorf("error marshalling thinking: %w", err)
+	}
+	request.Model = baseModel
+	request.THINKING = thinking
+	request.ReasoningEffort = effort
+	if info != nil {
+		if info.ChannelMeta != nil {
+			info.UpstreamModelName = baseModel
+		}
+		info.ReasoningEffort = effort
+	}
+	return nil
+}
+
+func applyDeepSeekV4ClaudeThinkingSuffix(info *relaycommon.RelayInfo, request *dto.ClaudeRequest) error {
+	modelName := request.Model
+	if info != nil && info.ChannelMeta != nil && info.UpstreamModelName != "" {
+		modelName = info.UpstreamModelName
+	}
+	baseModel, thinkingType, effort, ok := reasoning.ParseDeepSeekV4ThinkingSuffix(modelName)
+	if !ok {
+		return nil
+	}
+	request.Model = baseModel
+	request.Thinking = &dto.Thinking{Type: thinkingType}
+	if effort == "" {
+		request.OutputConfig = nil
+	} else {
+		outputConfig, err := common.Marshal(map[string]string{
+			"effort": effort,
+		})
+		if err != nil {
+			return fmt.Errorf("error marshalling output_config: %w", err)
+		}
+		request.OutputConfig = outputConfig
+	}
+	if info != nil {
+		if info.ChannelMeta != nil {
+			info.UpstreamModelName = baseModel
+		}
+		info.ReasoningEffort = effort
+	}
+	return nil
+}
+
 func (a *Adaptor) ConvertRerankRequest(c *gin.Context, relayMode int, request dto.RerankRequest) (any, error) {
 	return nil, nil
 }

+ 2 - 0
relay/channel/deepseek/constants.go

@@ -2,6 +2,8 @@ package deepseek
 
 var ModelList = []string{
 	"deepseek-chat", "deepseek-reasoner",
+	"deepseek-v4-flash", "deepseek-v4-flash-none", "deepseek-v4-flash-max",
+	"deepseek-v4-pro", "deepseek-v4-pro-none", "deepseek-v4-pro-max",
 }
 
 var ChannelName = "deepseek"

+ 3 - 17
relay/channel/openai/adaptor.go

@@ -28,6 +28,7 @@ import (
 	relayconstant "github.com/QuantumNous/new-api/relay/constant"
 	"github.com/QuantumNous/new-api/service"
 	"github.com/QuantumNous/new-api/setting/model_setting"
+	"github.com/QuantumNous/new-api/setting/reasoning"
 	"github.com/QuantumNous/new-api/types"
 	"github.com/samber/lo"
 
@@ -39,21 +40,6 @@ type Adaptor struct {
 	ResponseFormat string
 }
 
-// parseReasoningEffortFromModelSuffix 从模型名称中解析推理级别
-// support OAI models: o1-mini/o3-mini/o4-mini/o1/o3 etc...
-// minimal effort only available in gpt-5
-func parseReasoningEffortFromModelSuffix(model string) (string, string) {
-	effortSuffixes := []string{"-high", "-minimal", "-low", "-medium", "-none", "-xhigh"}
-	for _, suffix := range effortSuffixes {
-		if strings.HasSuffix(model, suffix) {
-			effort := strings.TrimPrefix(suffix, "-")
-			originModel := strings.TrimSuffix(model, suffix)
-			return effort, originModel
-		}
-	}
-	return "", model
-}
-
 func (a *Adaptor) ConvertGeminiRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeminiChatRequest) (any, error) {
 	// 使用 service.GeminiToOpenAIRequest 转换请求格式
 	openaiRequest, err := service.GeminiToOpenAIRequest(request, info)
@@ -342,7 +328,7 @@ func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayIn
 		}
 
 		// 转换模型推理力度后缀
-		effort, originModel := parseReasoningEffortFromModelSuffix(info.UpstreamModelName)
+		effort, originModel := reasoning.ParseOpenAIReasoningEffortFromModelSuffix(info.UpstreamModelName)
 		if effort != "" {
 			request.ReasoningEffort = effort
 			info.UpstreamModelName = originModel
@@ -587,7 +573,7 @@ func detectImageMimeType(filename string) string {
 
 func (a *Adaptor) ConvertOpenAIResponsesRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.OpenAIResponsesRequest) (any, error) {
 	//  转换模型推理力度后缀
-	effort, originModel := parseReasoningEffortFromModelSuffix(request.Model)
+	effort, originModel := reasoning.ParseOpenAIReasoningEffortFromModelSuffix(request.Model)
 	if effort != "" {
 		if request.Reasoning == nil {
 			request.Reasoning = &dto.Reasoning{

+ 32 - 1
setting/reasoning/suffix.go

@@ -8,9 +8,17 @@ import (
 
 var EffortSuffixes = []string{"-max", "-xhigh", "-high", "-medium", "-low", "-minimal"}
 
+var OpenAIEffortSuffixes = []string{"-high", "-minimal", "-low", "-medium", "-none", "-xhigh"}
+
+var DeepSeekV4EffortSuffixes = []string{"-none", "-max"}
+
 // TrimEffortSuffix -> modelName level(low) exists
 func TrimEffortSuffix(modelName string) (string, string, bool) {
-	suffix, found := lo.Find(EffortSuffixes, func(s string) bool {
+	return TrimEffortSuffixWithSuffixes(modelName, EffortSuffixes)
+}
+
+func TrimEffortSuffixWithSuffixes(modelName string, suffixes []string) (string, string, bool) {
+	suffix, found := lo.Find(suffixes, func(s string) bool {
 		return strings.HasSuffix(modelName, s)
 	})
 	if !found {
@@ -18,3 +26,26 @@ func TrimEffortSuffix(modelName string) (string, string, bool) {
 	}
 	return strings.TrimSuffix(modelName, suffix), strings.TrimPrefix(suffix, "-"), true
 }
+
+func ParseOpenAIReasoningEffortFromModelSuffix(modelName string) (string, string) {
+	baseModel, effort, ok := TrimEffortSuffixWithSuffixes(modelName, OpenAIEffortSuffixes)
+	if !ok {
+		return "", modelName
+	}
+	return effort, baseModel
+}
+
+func ParseDeepSeekV4ThinkingSuffix(modelName string) (baseModel string, thinkingType string, effort string, ok bool) {
+	baseModel, suffix, ok := TrimEffortSuffixWithSuffixes(modelName, DeepSeekV4EffortSuffixes)
+	if !ok || !strings.HasPrefix(baseModel, "deepseek-v4-") {
+		return modelName, "", "", false
+	}
+	switch suffix {
+	case "none":
+		return baseModel, "disabled", "", true
+	case "max":
+		return baseModel, "enabled", "max", true
+	default:
+		return modelName, "", "", false
+	}
+}