Просмотр исходного кода

Merge remote-tracking branch 'origin/main'

CaIon 10 месяцев назад
Родитель
Сommit
4ecf5dde14

+ 25 - 1
controller/user.go

@@ -592,7 +592,14 @@ func UpdateSelf(c *gin.Context) {
 		user.Password = "" // rollback to what it should be
 		cleanUser.Password = ""
 	}
-	updatePassword := user.Password != ""
+	updatePassword, err := checkUpdatePassword(user.OriginalPassword, user.Password, cleanUser.Id)
+	if err != nil {
+		c.JSON(http.StatusOK, gin.H{
+			"success": false,
+			"message": err.Error(),
+		})
+		return
+	}
 	if err := cleanUser.Update(updatePassword); err != nil {
 		c.JSON(http.StatusOK, gin.H{
 			"success": false,
@@ -608,6 +615,23 @@ func UpdateSelf(c *gin.Context) {
 	return
 }
 
+func checkUpdatePassword(originalPassword string, newPassword string, userId int) (updatePassword bool, err error) {
+	var currentUser *model.User
+	currentUser, err = model.GetUserById(userId, true)
+	if err != nil {
+		return
+	}
+	if !common.ValidatePasswordAndHash(originalPassword, currentUser.Password) {
+		err = fmt.Errorf("原密码错误")
+		return
+	}
+	if newPassword == "" {
+		return
+	}
+	updatePassword = true
+	return
+}
+
 func DeleteUser(c *gin.Context) {
 	id, err := strconv.Atoi(c.Param("id"))
 	if err != nil {

+ 30 - 25
dto/openai_response.go

@@ -195,28 +195,28 @@ type OutputTokenDetails struct {
 }
 
 type OpenAIResponsesResponse struct {
-	ID                 string             `json:"id"`
-	Object             string             `json:"object"`
-	CreatedAt          int                `json:"created_at"`
-	Status             string             `json:"status"`
-	Error              *OpenAIError       `json:"error,omitempty"`
-	IncompleteDetails  *IncompleteDetails `json:"incomplete_details,omitempty"`
-	Instructions       string             `json:"instructions"`
-	MaxOutputTokens    int                `json:"max_output_tokens"`
-	Model              string             `json:"model"`
-	Output             []ResponsesOutput  `json:"output"`
-	ParallelToolCalls  bool               `json:"parallel_tool_calls"`
-	PreviousResponseID string             `json:"previous_response_id"`
-	Reasoning          *Reasoning         `json:"reasoning"`
-	Store              bool               `json:"store"`
-	Temperature        float64            `json:"temperature"`
-	ToolChoice         string             `json:"tool_choice"`
-	Tools              []interface{}      `json:"tools"`
-	TopP               float64            `json:"top_p"`
-	Truncation         string             `json:"truncation"`
-	Usage              *Usage             `json:"usage"`
-	User               json.RawMessage    `json:"user"`
-	Metadata           json.RawMessage    `json:"metadata"`
+	ID                 string               `json:"id"`
+	Object             string               `json:"object"`
+	CreatedAt          int                  `json:"created_at"`
+	Status             string               `json:"status"`
+	Error              *OpenAIError         `json:"error,omitempty"`
+	IncompleteDetails  *IncompleteDetails   `json:"incomplete_details,omitempty"`
+	Instructions       string               `json:"instructions"`
+	MaxOutputTokens    int                  `json:"max_output_tokens"`
+	Model              string               `json:"model"`
+	Output             []ResponsesOutput    `json:"output"`
+	ParallelToolCalls  bool                 `json:"parallel_tool_calls"`
+	PreviousResponseID string               `json:"previous_response_id"`
+	Reasoning          *Reasoning           `json:"reasoning"`
+	Store              bool                 `json:"store"`
+	Temperature        float64              `json:"temperature"`
+	ToolChoice         string               `json:"tool_choice"`
+	Tools              []ResponsesToolsCall `json:"tools"`
+	TopP               float64              `json:"top_p"`
+	Truncation         string               `json:"truncation"`
+	Usage              *Usage               `json:"usage"`
+	User               json.RawMessage      `json:"user"`
+	Metadata           json.RawMessage      `json:"metadata"`
 }
 
 type IncompleteDetails struct {
@@ -238,8 +238,12 @@ type ResponsesOutputContent struct {
 }
 
 const (
-	BuildInTools_WebSearch  = "web_search_preview"
-	BuildInTools_FileSearch = "file_search"
+	BuildInToolWebSearchPreview = "web_search_preview"
+	BuildInToolFileSearch       = "file_search"
+)
+
+const (
+	BuildInCallWebSearchCall = "web_search_call"
 )
 
 const (
@@ -250,6 +254,7 @@ const (
 // ResponsesStreamResponse 用于处理 /v1/responses 流式响应
 type ResponsesStreamResponse struct {
 	Type     string                   `json:"type"`
-	Response *OpenAIResponsesResponse `json:"response"`
+	Response *OpenAIResponsesResponse `json:"response,omitempty"`
 	Delta    string                   `json:"delta,omitempty"`
+	Item     *ResponsesOutput         `json:"item,omitempty"`
 }

+ 21 - 19
middleware/model-rate-limit.go

@@ -93,25 +93,27 @@ func redisRateLimitHandler(duration int64, totalMaxCount, successMaxCount int) g
 		}
 
 		//2.检查总请求数限制并记录总请求(当totalMaxCount为0时会自动跳过,使用令牌桶限流器
-		totalKey := fmt.Sprintf("rateLimit:%s", userId)
-		// 初始化
-		tb := limiter.New(ctx, rdb)
-		allowed, err = tb.Allow(
-			ctx,
-			totalKey,
-			limiter.WithCapacity(int64(totalMaxCount)*duration),
-			limiter.WithRate(int64(totalMaxCount)),
-			limiter.WithRequested(duration),
-		)
-
-		if err != nil {
-			fmt.Println("检查总请求数限制失败:", err.Error())
-			abortWithOpenAiMessage(c, http.StatusInternalServerError, "rate_limit_check_failed")
-			return
-		}
-
-		if !allowed {
-			abortWithOpenAiMessage(c, http.StatusTooManyRequests, fmt.Sprintf("您已达到总请求数限制:%d分钟内最多请求%d次,包括失败次数,请检查您的请求是否正确", setting.ModelRequestRateLimitDurationMinutes, totalMaxCount))
+		if totalMaxCount > 0 {
+			totalKey := fmt.Sprintf("rateLimit:%s", userId)
+			// 初始化
+			tb := limiter.New(ctx, rdb)
+			allowed, err = tb.Allow(
+				ctx,
+				totalKey,
+				limiter.WithCapacity(int64(totalMaxCount)*duration),
+				limiter.WithRate(int64(totalMaxCount)),
+				limiter.WithRequested(duration),
+			)
+
+			if err != nil {
+				fmt.Println("检查总请求数限制失败:", err.Error())
+				abortWithOpenAiMessage(c, http.StatusInternalServerError, "rate_limit_check_failed")
+				return
+			}
+
+			if !allowed {
+				abortWithOpenAiMessage(c, http.StatusTooManyRequests, fmt.Sprintf("您已达到总请求数限制:%d分钟内最多请求%d次,包括失败次数,请检查您的请求是否正确", setting.ModelRequestRateLimitDurationMinutes, totalMaxCount))
+			}
 		}
 
 		// 4. 处理请求

+ 1 - 0
model/user.go

@@ -18,6 +18,7 @@ type User struct {
 	Id               int            `json:"id"`
 	Username         string         `json:"username" gorm:"unique;index" validate:"max=12"`
 	Password         string         `json:"password" gorm:"not null;" validate:"min=8,max=20"`
+	OriginalPassword string         `json:"original_password" gorm:"-:all"` // this field is only for Password change verification, don't save it to database!
 	DisplayName      string         `json:"display_name" gorm:"index" validate:"max=20"`
 	Role             int            `json:"role" gorm:"type:int;default:1"`   // admin, common
 	Status           int            `json:"status" gorm:"type:int;default:1"` // enabled, disabled

+ 1 - 1
relay/channel/openai/adaptor.go

@@ -429,7 +429,7 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
 		if info.IsStream {
 			err, usage = OaiResponsesStreamHandler(c, resp, info)
 		} else {
-			err, usage = OpenaiResponsesHandler(c, resp, info)
+			err, usage = OaiResponsesHandler(c, resp, info)
 		}
 	default:
 		if info.IsStream {

+ 7 - 0
relay/channel/openai/helper.go

@@ -187,3 +187,10 @@ func handleFinalResponse(c *gin.Context, info *relaycommon.RelayInfo, lastStream
 		}
 	}
 }
+
+func sendResponsesStreamData(c *gin.Context, streamResponse dto.ResponsesStreamResponse, data string) {
+	if data == "" {
+		return
+	}
+	helper.ResponseChunkData(c, streamResponse, data)
+}

+ 0 - 99
relay/channel/openai/relay-openai.go

@@ -644,102 +644,3 @@ func OpenaiHandlerWithUsage(c *gin.Context, resp *http.Response, info *relaycomm
 	}
 	return nil, &usageResp.Usage
 }
-
-func OpenaiResponsesHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
-	// read response body
-	var responsesResponse dto.OpenAIResponsesResponse
-	responseBody, err := io.ReadAll(resp.Body)
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
-	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
-	err = common.DecodeJson(responseBody, &responsesResponse)
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
-	}
-	if responsesResponse.Error != nil {
-		return &dto.OpenAIErrorWithStatusCode{
-			Error: dto.OpenAIError{
-				Message: responsesResponse.Error.Message,
-				Type:    "openai_error",
-				Code:    responsesResponse.Error.Code,
-			},
-			StatusCode: resp.StatusCode,
-		}, nil
-	}
-
-	// reset response body
-	resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
-	// We shouldn't set the header before we parse the response body, because the parse part may fail.
-	// And then we will have to send an error response, but in this case, the header has already been set.
-	// So the httpClient will be confused by the response.
-	// For example, Postman will report error, and we cannot check the response at all.
-	for k, v := range resp.Header {
-		c.Writer.Header().Set(k, v[0])
-	}
-	c.Writer.WriteHeader(resp.StatusCode)
-	// copy response body
-	_, err = io.Copy(c.Writer, resp.Body)
-	if err != nil {
-		common.SysError("error copying response body: " + err.Error())
-	}
-	resp.Body.Close()
-	// compute usage
-	usage := dto.Usage{}
-	usage.PromptTokens = responsesResponse.Usage.InputTokens
-	usage.CompletionTokens = responsesResponse.Usage.OutputTokens
-	usage.TotalTokens = responsesResponse.Usage.TotalTokens
-	return nil, &usage
-}
-
-func OaiResponsesStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
-	if resp == nil || resp.Body == nil {
-		common.LogError(c, "invalid response or response body")
-		return service.OpenAIErrorWrapper(fmt.Errorf("invalid response"), "invalid_response", http.StatusInternalServerError), nil
-	}
-
-	var usage = &dto.Usage{}
-	var responseTextBuilder strings.Builder
-
-	helper.StreamScannerHandler(c, resp, info, func(data string) bool {
-
-		// 检查当前数据是否包含 completed 状态和 usage 信息
-		var streamResponse dto.ResponsesStreamResponse
-		if err := common.DecodeJsonStr(data, &streamResponse); err == nil {
-			sendResponsesStreamData(c, streamResponse, data)
-			switch streamResponse.Type {
-			case "response.completed":
-				usage.PromptTokens = streamResponse.Response.Usage.InputTokens
-				usage.CompletionTokens = streamResponse.Response.Usage.OutputTokens
-				usage.TotalTokens = streamResponse.Response.Usage.TotalTokens
-			case "response.output_text.delta":
-				// 处理输出文本
-				responseTextBuilder.WriteString(streamResponse.Delta)
-
-			}
-		}
-		return true
-	})
-
-	if usage.CompletionTokens == 0 {
-		// 计算输出文本的 token 数量
-		tempStr := responseTextBuilder.String()
-		if len(tempStr) > 0 {
-			// 非正常结束,使用输出文本的 token 数量
-			completionTokens, _ := service.CountTextToken(tempStr, info.UpstreamModelName)
-			usage.CompletionTokens = completionTokens
-		}
-	}
-
-	return nil, usage
-}
-
-func sendResponsesStreamData(c *gin.Context, streamResponse dto.ResponsesStreamResponse, data string) {
-	if data == "" {
-		return
-	}
-	helper.ResponseChunkData(c, streamResponse, data)
-}

+ 119 - 0
relay/channel/openai/relay_responses.go

@@ -0,0 +1,119 @@
+package openai
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net/http"
+	"one-api/common"
+	"one-api/dto"
+	relaycommon "one-api/relay/common"
+	"one-api/relay/helper"
+	"one-api/service"
+	"strings"
+
+	"github.com/gin-gonic/gin"
+)
+
+func OaiResponsesHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
+	// read response body
+	var responsesResponse dto.OpenAIResponsesResponse
+	responseBody, err := io.ReadAll(resp.Body)
+	if err != nil {
+		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
+	}
+	err = resp.Body.Close()
+	if err != nil {
+		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
+	}
+	err = common.DecodeJson(responseBody, &responsesResponse)
+	if err != nil {
+		return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
+	}
+	if responsesResponse.Error != nil {
+		return &dto.OpenAIErrorWithStatusCode{
+			Error: dto.OpenAIError{
+				Message: responsesResponse.Error.Message,
+				Type:    "openai_error",
+				Code:    responsesResponse.Error.Code,
+			},
+			StatusCode: resp.StatusCode,
+		}, nil
+	}
+
+	// reset response body
+	resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
+	// We shouldn't set the header before we parse the response body, because the parse part may fail.
+	// And then we will have to send an error response, but in this case, the header has already been set.
+	// So the httpClient will be confused by the response.
+	// For example, Postman will report error, and we cannot check the response at all.
+	for k, v := range resp.Header {
+		c.Writer.Header().Set(k, v[0])
+	}
+	c.Writer.WriteHeader(resp.StatusCode)
+	// copy response body
+	_, err = io.Copy(c.Writer, resp.Body)
+	if err != nil {
+		common.SysError("error copying response body: " + err.Error())
+	}
+	resp.Body.Close()
+	// compute usage
+	usage := dto.Usage{}
+	usage.PromptTokens = responsesResponse.Usage.InputTokens
+	usage.CompletionTokens = responsesResponse.Usage.OutputTokens
+	usage.TotalTokens = responsesResponse.Usage.TotalTokens
+	// 解析 Tools 用量
+	for _, tool := range responsesResponse.Tools {
+		info.ResponsesUsageInfo.BuiltInTools[tool.Type].CallCount++
+	}
+	return nil, &usage
+}
+
+func OaiResponsesStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
+	if resp == nil || resp.Body == nil {
+		common.LogError(c, "invalid response or response body")
+		return service.OpenAIErrorWrapper(fmt.Errorf("invalid response"), "invalid_response", http.StatusInternalServerError), nil
+	}
+
+	var usage = &dto.Usage{}
+	var responseTextBuilder strings.Builder
+
+	helper.StreamScannerHandler(c, resp, info, func(data string) bool {
+
+		// 检查当前数据是否包含 completed 状态和 usage 信息
+		var streamResponse dto.ResponsesStreamResponse
+		if err := common.DecodeJsonStr(data, &streamResponse); err == nil {
+			sendResponsesStreamData(c, streamResponse, data)
+			switch streamResponse.Type {
+			case "response.completed":
+				usage.PromptTokens = streamResponse.Response.Usage.InputTokens
+				usage.CompletionTokens = streamResponse.Response.Usage.OutputTokens
+				usage.TotalTokens = streamResponse.Response.Usage.TotalTokens
+			case "response.output_text.delta":
+				// 处理输出文本
+				responseTextBuilder.WriteString(streamResponse.Delta)
+			case dto.ResponsesOutputTypeItemDone:
+				// 函数调用处理
+				if streamResponse.Item != nil {
+					switch streamResponse.Item.Type {
+					case dto.BuildInCallWebSearchCall:
+						info.ResponsesUsageInfo.BuiltInTools[dto.BuildInToolWebSearchPreview].CallCount++
+					}
+				}
+			}
+		}
+		return true
+	})
+
+	if usage.CompletionTokens == 0 {
+		// 计算输出文本的 token 数量
+		tempStr := responseTextBuilder.String()
+		if len(tempStr) > 0 {
+			// 非正常结束,使用输出文本的 token 数量
+			completionTokens, _ := service.CountTextToken(tempStr, info.UpstreamModelName)
+			usage.CompletionTokens = completionTokens
+		}
+	}
+
+	return nil, usage
+}

+ 1 - 1
relay/channel/vertex/adaptor.go

@@ -11,8 +11,8 @@ import (
 	"one-api/relay/channel/claude"
 	"one-api/relay/channel/gemini"
 	"one-api/relay/channel/openai"
-	"one-api/setting/model_setting"
 	relaycommon "one-api/relay/common"
+	"one-api/setting/model_setting"
 	"strings"
 
 	"github.com/gin-gonic/gin"

+ 37 - 0
relay/common/relay_info.go

@@ -36,6 +36,7 @@ type ClaudeConvertInfo struct {
 const (
 	RelayFormatOpenAI = "openai"
 	RelayFormatClaude = "claude"
+	RelayFormatGemini = "gemini"
 )
 
 type RerankerInfo struct {
@@ -43,6 +44,16 @@ type RerankerInfo struct {
 	ReturnDocuments bool
 }
 
+type BuildInToolInfo struct {
+	ToolName          string
+	CallCount         int
+	SearchContextSize string
+}
+
+type ResponsesUsageInfo struct {
+	BuiltInTools map[string]*BuildInToolInfo
+}
+
 type RelayInfo struct {
 	ChannelType       int
 	ChannelId         int
@@ -90,6 +101,7 @@ type RelayInfo struct {
 	ThinkingContentInfo
 	*ClaudeConvertInfo
 	*RerankerInfo
+	*ResponsesUsageInfo
 }
 
 // 定义支持流式选项的通道类型
@@ -135,6 +147,31 @@ func GenRelayInfoRerank(c *gin.Context, req *dto.RerankRequest) *RelayInfo {
 	return info
 }
 
+func GenRelayInfoResponses(c *gin.Context, req *dto.OpenAIResponsesRequest) *RelayInfo {
+	info := GenRelayInfo(c)
+	info.RelayMode = relayconstant.RelayModeResponses
+	info.ResponsesUsageInfo = &ResponsesUsageInfo{
+		BuiltInTools: make(map[string]*BuildInToolInfo),
+	}
+	if len(req.Tools) > 0 {
+		for _, tool := range req.Tools {
+			info.ResponsesUsageInfo.BuiltInTools[tool.Type] = &BuildInToolInfo{
+				ToolName:  tool.Type,
+				CallCount: 0,
+			}
+			switch tool.Type {
+			case dto.BuildInToolWebSearchPreview:
+				if tool.SearchContextSize == "" {
+					tool.SearchContextSize = "medium"
+				}
+				info.ResponsesUsageInfo.BuiltInTools[tool.Type].SearchContextSize = tool.SearchContextSize
+			}
+		}
+	}
+	info.IsStream = req.Stream
+	return info
+}
+
 func GenRelayInfo(c *gin.Context) *RelayInfo {
 	channelType := c.GetInt("channel_type")
 	channelId := c.GetInt("channel_id")

+ 33 - 4
relay/helper/model_mapped.go

@@ -2,9 +2,11 @@ package helper
 
 import (
 	"encoding/json"
+	"errors"
 	"fmt"
-	"github.com/gin-gonic/gin"
 	"one-api/relay/common"
+
+	"github.com/gin-gonic/gin"
 )
 
 func ModelMappedHelper(c *gin.Context, info *common.RelayInfo) error {
@@ -16,9 +18,36 @@ func ModelMappedHelper(c *gin.Context, info *common.RelayInfo) error {
 		if err != nil {
 			return fmt.Errorf("unmarshal_model_mapping_failed")
 		}
-		if modelMap[info.OriginModelName] != "" {
-			info.UpstreamModelName = modelMap[info.OriginModelName]
-			info.IsModelMapped = true
+
+		// 支持链式模型重定向,最终使用链尾的模型
+		currentModel := info.OriginModelName
+		visitedModels := map[string]bool{
+			currentModel: true,
+		}
+		for {
+			if mappedModel, exists := modelMap[currentModel]; exists && mappedModel != "" {
+				// 模型重定向循环检测,避免无限循环
+				if visitedModels[mappedModel] {
+					if mappedModel == currentModel {
+						if currentModel == info.OriginModelName {
+							info.IsModelMapped = false
+							return nil
+						} else {
+							info.IsModelMapped = true
+							break
+						}
+					}
+					return errors.New("model_mapping_contains_cycle")
+				}
+				visitedModels[mappedModel] = true
+				currentModel = mappedModel
+				info.IsModelMapped = true
+			} else {
+				break
+			}
+		}
+		if info.IsModelMapped {
+			info.UpstreamModelName = currentModel
 		}
 	}
 	return nil

+ 5 - 5
relay/relay-responses.go

@@ -19,7 +19,7 @@ import (
 	"github.com/gin-gonic/gin"
 )
 
-func getAndValidateResponsesRequest(c *gin.Context, relayInfo *relaycommon.RelayInfo) (*dto.OpenAIResponsesRequest, error) {
+func getAndValidateResponsesRequest(c *gin.Context) (*dto.OpenAIResponsesRequest, error) {
 	request := &dto.OpenAIResponsesRequest{}
 	err := common.UnmarshalBodyReusable(c, request)
 	if err != nil {
@@ -31,13 +31,11 @@ func getAndValidateResponsesRequest(c *gin.Context, relayInfo *relaycommon.Relay
 	if len(request.Input) == 0 {
 		return nil, errors.New("input is required")
 	}
-	relayInfo.IsStream = request.Stream
 	return request, nil
 
 }
 
 func checkInputSensitive(textRequest *dto.OpenAIResponsesRequest, info *relaycommon.RelayInfo) ([]string, error) {
-
 	sensitiveWords, err := service.CheckSensitiveInput(textRequest.Input)
 	return sensitiveWords, err
 }
@@ -49,12 +47,14 @@ func getInputTokens(req *dto.OpenAIResponsesRequest, info *relaycommon.RelayInfo
 }
 
 func ResponsesHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
-	relayInfo := relaycommon.GenRelayInfo(c)
-	req, err := getAndValidateResponsesRequest(c, relayInfo)
+	req, err := getAndValidateResponsesRequest(c)
 	if err != nil {
 		common.LogError(c, fmt.Sprintf("getAndValidateResponsesRequest error: %s", err.Error()))
 		return service.OpenAIErrorWrapperLocal(err, "invalid_responses_request", http.StatusBadRequest)
 	}
+
+	relayInfo := relaycommon.GenRelayInfoResponses(c, req)
+
 	if setting.ShouldCheckPromptSensitive() {
 		sensitiveWords, err := checkInputSensitive(req, relayInfo)
 		if err != nil {

+ 46 - 0
relay/relay-text.go

@@ -18,6 +18,7 @@ import (
 	"one-api/service"
 	"one-api/setting"
 	"one-api/setting/model_setting"
+	"one-api/setting/operation_setting"
 	"strings"
 	"time"
 
@@ -358,6 +359,34 @@ func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
 
 	ratio := dModelRatio.Mul(dGroupRatio)
 
+	// openai web search 工具计费
+	var dWebSearchQuota decimal.Decimal
+	var webSearchPrice float64
+	if relayInfo.ResponsesUsageInfo != nil {
+		if webSearchTool, exists := relayInfo.ResponsesUsageInfo.BuiltInTools[dto.BuildInToolWebSearchPreview]; exists && webSearchTool.CallCount > 0 {
+			// 计算 web search 调用的配额 (配额 = 价格 * 调用次数 / 1000)
+			webSearchPrice = operation_setting.GetWebSearchPricePerThousand(modelName, webSearchTool.SearchContextSize)
+			dWebSearchQuota = decimal.NewFromFloat(webSearchPrice).
+				Mul(decimal.NewFromInt(int64(webSearchTool.CallCount))).
+				Div(decimal.NewFromInt(1000))
+			extraContent += fmt.Sprintf("Web Search 调用 %d 次,上下文大小 %s,调用花费 $%s",
+				webSearchTool.CallCount, webSearchTool.SearchContextSize, dWebSearchQuota.String())
+		}
+	}
+	// file search tool 计费
+	var dFileSearchQuota decimal.Decimal
+	var fileSearchPrice float64
+	if relayInfo.ResponsesUsageInfo != nil {
+		if fileSearchTool, exists := relayInfo.ResponsesUsageInfo.BuiltInTools[dto.BuildInToolFileSearch]; exists && fileSearchTool.CallCount > 0 {
+			fileSearchPrice = operation_setting.GetFileSearchPricePerThousand()
+			dFileSearchQuota = decimal.NewFromFloat(fileSearchPrice).
+				Mul(decimal.NewFromInt(int64(fileSearchTool.CallCount))).
+				Div(decimal.NewFromInt(1000))
+			extraContent += fmt.Sprintf("File Search 调用 %d 次,调用花费 $%s",
+				fileSearchTool.CallCount, dFileSearchQuota.String())
+		}
+	}
+
 	var quotaCalculateDecimal decimal.Decimal
 	if !priceData.UsePrice {
 		nonCachedTokens := dPromptTokens.Sub(dCacheTokens)
@@ -380,6 +409,9 @@ func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
 	} else {
 		quotaCalculateDecimal = dModelPrice.Mul(dQuotaPerUnit).Mul(dGroupRatio)
 	}
+	// 添加 responses tools call 调用的配额
+	quotaCalculateDecimal = quotaCalculateDecimal.Add(dWebSearchQuota)
+	quotaCalculateDecimal = quotaCalculateDecimal.Add(dFileSearchQuota)
 
 	quota := int(quotaCalculateDecimal.Round(0).IntPart())
 	totalTokens := promptTokens + completionTokens
@@ -430,6 +462,20 @@ func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
 		other["image_ratio"] = imageRatio
 		other["image_output"] = imageTokens
 	}
+	if !dWebSearchQuota.IsZero() && relayInfo.ResponsesUsageInfo != nil {
+		if webSearchTool, exists := relayInfo.ResponsesUsageInfo.BuiltInTools[dto.BuildInToolWebSearchPreview]; exists {
+			other["web_search"] = true
+			other["web_search_call_count"] = webSearchTool.CallCount
+			other["web_search_price"] = webSearchPrice
+		}
+	}
+	if !dFileSearchQuota.IsZero() && relayInfo.ResponsesUsageInfo != nil {
+		if fileSearchTool, exists := relayInfo.ResponsesUsageInfo.BuiltInTools[dto.BuildInToolFileSearch]; exists {
+			other["file_search"] = true
+			other["file_search_call_count"] = fileSearchTool.CallCount
+			other["file_search_price"] = fileSearchPrice
+		}
+	}
 	model.RecordConsumeLog(ctx, relayInfo.UserId, relayInfo.ChannelId, promptTokens, completionTokens, logModel,
 		tokenName, quota, logContent, relayInfo.TokenId, userQuota, int(useTimeSeconds), relayInfo.IsStream, relayInfo.Group, other)
 }

+ 57 - 0
setting/operation_setting/tools.go

@@ -0,0 +1,57 @@
+package operation_setting
+
+import "strings"
+
+const (
+	// Web search
+	WebSearchHighTierModelPriceLow    = 30.00
+	WebSearchHighTierModelPriceMedium = 35.00
+	WebSearchHighTierModelPriceHigh   = 50.00
+	WebSearchPriceLow                 = 25.00
+	WebSearchPriceMedium              = 27.50
+	WebSearchPriceHigh                = 30.00
+	// File search
+	FileSearchPrice = 2.5
+)
+
+func GetWebSearchPricePerThousand(modelName string, contextSize string) float64 {
+	// 确定模型类型
+	// https://platform.openai.com/docs/pricing Web search 价格按模型类型和 search context size 收费
+	// gpt-4.1, gpt-4o, or gpt-4o-search-preview 更贵,gpt-4.1-mini, gpt-4o-mini, gpt-4o-mini-search-preview 更便宜
+	isHighTierModel := (strings.HasPrefix(modelName, "gpt-4.1") || strings.HasPrefix(modelName, "gpt-4o")) &&
+		!strings.Contains(modelName, "mini")
+	// 确定 search context size 对应的价格
+	var priceWebSearchPerThousandCalls float64
+	switch contextSize {
+	case "low":
+		if isHighTierModel {
+			priceWebSearchPerThousandCalls = WebSearchHighTierModelPriceLow
+		} else {
+			priceWebSearchPerThousandCalls = WebSearchPriceLow
+		}
+	case "medium":
+		if isHighTierModel {
+			priceWebSearchPerThousandCalls = WebSearchHighTierModelPriceMedium
+		} else {
+			priceWebSearchPerThousandCalls = WebSearchPriceMedium
+		}
+	case "high":
+		if isHighTierModel {
+			priceWebSearchPerThousandCalls = WebSearchHighTierModelPriceHigh
+		} else {
+			priceWebSearchPerThousandCalls = WebSearchPriceHigh
+		}
+	default:
+		// search context size 默认为 medium
+		if isHighTierModel {
+			priceWebSearchPerThousandCalls = WebSearchHighTierModelPriceMedium
+		} else {
+			priceWebSearchPerThousandCalls = WebSearchPriceMedium
+		}
+	}
+	return priceWebSearchPerThousandCalls
+}
+
+func GetFileSearchPricePerThousand() float64 {
+	return FileSearchPrice
+}

+ 13 - 1
web/src/components/LogsTable.js

@@ -618,7 +618,6 @@ const LogsTable = () => {
             </Paragraph>
           );
         }
-
         let content = other?.claude
           ? renderClaudeModelPriceSimple(
               other.model_ratio,
@@ -935,6 +934,13 @@ const LogsTable = () => {
                 other.model_price,
                 other.group_ratio,
                 other?.user_group_ratio,
+                false,
+                1.0,
+                undefined,
+                other.web_search || false,
+                other.web_search_call_count || 0,
+                other.file_search || false,
+                other.file_search_call_count || 0,
               ),
         });
       }
@@ -995,6 +1001,12 @@ const LogsTable = () => {
             other?.image || false,
             other?.image_ratio || 0,
             other?.image_output || 0,
+            other?.web_search || false,
+            other?.web_search_call_count || 0,
+            other?.web_search_price || 0,
+            other?.file_search || false,
+            other?.file_search_call_count || 0,
+            other?.file_search_price || 0,
           );
         }
         expandDataLocal.push({

+ 46 - 9
web/src/components/PersonalSetting.js

@@ -57,6 +57,7 @@ const PersonalSetting = () => {
     email_verification_code: '',
     email: '',
     self_account_deletion_confirmation: '',
+    original_password: '',
     set_new_password: '',
     set_new_password_confirmation: '',
   });
@@ -239,11 +240,24 @@ const PersonalSetting = () => {
   };
 
   const changePassword = async () => {
+    if (inputs.original_password === '') {
+      showError(t('请输入原密码!'));
+      return;
+    }
+    if (inputs.set_new_password === '') {
+      showError(t('请输入新密码!'));
+      return;
+    }
+    if (inputs.original_password === inputs.set_new_password) {
+      showError(t('新密码需要和原密码不一致!'));
+      return;
+    }
     if (inputs.set_new_password !== inputs.set_new_password_confirmation) {
       showError(t('两次输入的密码不一致!'));
       return;
     }
     const res = await API.put(`/api/user/self`, {
+      original_password: inputs.original_password,
       password: inputs.set_new_password,
     });
     const { success, message } = res.data;
@@ -816,8 +830,8 @@ const PersonalSetting = () => {
               </div>
             </Card>
             <Card style={{ marginTop: 10 }}>
-              <Tabs type="line" defaultActiveKey="notification">
-                <TabPane tab={t('通知设置')} itemKey="notification">
+              <Tabs type='line' defaultActiveKey='notification'>
+                <TabPane tab={t('通知设置')} itemKey='notification'>
                   <div style={{ marginTop: 20 }}>
                     <Typography.Text strong>{t('通知方式')}</Typography.Text>
                     <div style={{ marginTop: 10 }}>
@@ -993,23 +1007,36 @@ const PersonalSetting = () => {
                     </Typography.Text>
                   </div>
                 </TabPane>
-                <TabPane tab={t('价格设置')} itemKey="price">
+                <TabPane tab={t('价格设置')} itemKey='price'>
                   <div style={{ marginTop: 20 }}>
-                    <Typography.Text strong>{t('接受未设置价格模型')}</Typography.Text>
+                    <Typography.Text strong>
+                      {t('接受未设置价格模型')}
+                    </Typography.Text>
                     <div style={{ marginTop: 10 }}>
                       <Checkbox
-                        checked={notificationSettings.acceptUnsetModelRatioModel}
-                        onChange={e => handleNotificationSettingChange('acceptUnsetModelRatioModel', e.target.checked)}
+                        checked={
+                          notificationSettings.acceptUnsetModelRatioModel
+                        }
+                        onChange={(e) =>
+                          handleNotificationSettingChange(
+                            'acceptUnsetModelRatioModel',
+                            e.target.checked,
+                          )
+                        }
                       >
                         {t('接受未设置价格模型')}
                       </Checkbox>
-                      <Typography.Text type="secondary" style={{ marginTop: 8, display: 'block' }}>
-                        {t('当模型没有设置价格时仍接受调用,仅当您信任该网站时使用,可能会产生高额费用')}
+                      <Typography.Text
+                        type='secondary'
+                        style={{ marginTop: 8, display: 'block' }}
+                      >
+                        {t(
+                          '当模型没有设置价格时仍接受调用,仅当您信任该网站时使用,可能会产生高额费用',
+                        )}
                       </Typography.Text>
                     </div>
                   </div>
                 </TabPane>
-
               </Tabs>
               <div style={{ marginTop: 20 }}>
                 <Button type='primary' onClick={saveNotificationSettings}>
@@ -1118,6 +1145,16 @@ const PersonalSetting = () => {
             >
               <div style={{ marginTop: 20 }}>
                 <Input
+                  name='original_password'
+                  placeholder={t('原密码')}
+                  type='password'
+                  value={inputs.original_password}
+                  onChange={(value) =>
+                    handleInputChange('original_password', value)
+                  }
+                />
+                <Input
+                  style={{ marginTop: 20 }}
                   name='set_new_password'
                   placeholder={t('新密码')}
                   value={inputs.set_new_password}

+ 135 - 45
web/src/helpers/render.js

@@ -317,6 +317,12 @@ export function renderModelPrice(
   image = false,
   imageRatio = 1.0,
   imageOutputTokens = 0,
+  webSearch = false,
+  webSearchCallCount = 0,
+  webSearchPrice = 0,
+  fileSearch = false,
+  fileSearchCallCount = 0,
+  fileSearchPrice = 0,
 ) {
   if (modelPrice !== -1) {
     return i18next.t(
@@ -339,14 +345,17 @@ export function renderModelPrice(
     // Calculate effective input tokens (non-cached + cached with ratio applied)
     let effectiveInputTokens =
       inputTokens - cacheTokens + cacheTokens * cacheRatio;
-// Handle image tokens if present
+    // Handle image tokens if present
     if (image && imageOutputTokens > 0) {
-      effectiveInputTokens = inputTokens - imageOutputTokens + imageOutputTokens * imageRatio;
+      effectiveInputTokens =
+        inputTokens - imageOutputTokens + imageOutputTokens * imageRatio;
     }
 
     let price =
       (effectiveInputTokens / 1000000) * inputRatioPrice * groupRatio +
-      (completionTokens / 1000000) * completionRatioPrice * groupRatio;
+      (completionTokens / 1000000) * completionRatioPrice * groupRatio +
+      (webSearchCallCount / 1000) * webSearchPrice +
+      (fileSearchCallCount / 1000) * fileSearchPrice;
 
     return (
       <>
@@ -391,9 +400,23 @@ export function renderModelPrice(
               )}
             </p>
           )}
+          {webSearch && webSearchCallCount > 0 && (
+            <p>
+              {i18next.t('Web搜索价格:${{price}} / 1K 次', {
+                price: webSearchPrice,
+              })}
+            </p>
+          )}
+          {fileSearch && fileSearchCallCount > 0 && (
+            <p>
+              {i18next.t('文件搜索价格:${{price}} / 1K 次', {
+                price: fileSearchPrice,
+              })}
+            </p>
+          )}
           <p></p>
           <p>
-            {cacheTokens > 0 && !image
+            {cacheTokens > 0 && !image && !webSearch && !fileSearch
               ? i18next.t(
                   '输入 {{nonCacheInput}} tokens / 1M tokens * ${{price}} + 缓存 {{cacheInput}} tokens / 1M tokens * ${{cachePrice}} + 输出 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} = ${{total}}',
                   {
@@ -407,31 +430,75 @@ export function renderModelPrice(
                     total: price.toFixed(6),
                   },
                 )
-              : image && imageOutputTokens > 0
-              ? i18next.t(
-                  '输入 {{nonImageInput}} tokens + 图片输入 {{imageInput}} tokens * {{imageRatio}} / 1M tokens * ${{price}} + 输出 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} = ${{total}}',
-                  {
-                    nonImageInput: inputTokens - imageOutputTokens,
-                    imageInput: imageOutputTokens,
-                    imageRatio: imageRatio,
-                    price: inputRatioPrice,
-                    completion: completionTokens,
-                    compPrice: completionRatioPrice,
-                    ratio: groupRatio,
-                    total: price.toFixed(6),
-                  },
-                )
-              : i18next.t(
-                  '输入 {{input}} tokens / 1M tokens * ${{price}} + 输出 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} = ${{total}}',
-                  {
-                    input: inputTokens,
-                    price: inputRatioPrice,
-                    completion: completionTokens,
-                    compPrice: completionRatioPrice,
-                    ratio: groupRatio,
-                    total: price.toFixed(6),
-                  },
-                )}
+              : image && imageOutputTokens > 0 && !webSearch && !fileSearch
+                ? i18next.t(
+                    '输入 {{nonImageInput}} tokens + 图片输入 {{imageInput}} tokens * {{imageRatio}} / 1M tokens * ${{price}} + 输出 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} = ${{total}}',
+                    {
+                      nonImageInput: inputTokens - imageOutputTokens,
+                      imageInput: imageOutputTokens,
+                      imageRatio: imageRatio,
+                      price: inputRatioPrice,
+                      completion: completionTokens,
+                      compPrice: completionRatioPrice,
+                      ratio: groupRatio,
+                      total: price.toFixed(6),
+                    },
+                  )
+                : webSearch && webSearchCallCount > 0 && !image && !fileSearch
+                  ? i18next.t(
+                      '输入 {{input}} tokens / 1M tokens * ${{price}} + 输出 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} + Web搜索 {{webSearchCallCount}}次 / 1K 次 * ${{webSearchPrice}} = ${{total}}',
+                      {
+                        input: inputTokens,
+                        price: inputRatioPrice,
+                        completion: completionTokens,
+                        compPrice: completionRatioPrice,
+                        ratio: groupRatio,
+                        webSearchCallCount,
+                        webSearchPrice,
+                        total: price.toFixed(6),
+                      },
+                    )
+                  : fileSearch && fileSearchCallCount > 0 && !image && !webSearch
+                    ? i18next.t(
+                        '输入 {{input}} tokens / 1M tokens * ${{price}} + 输出 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} + 文件搜索 {{fileSearchCallCount}}次 / 1K 次 * ${{fileSearchPrice}} = ${{total}}',
+                        {
+                          input: inputTokens,
+                          price: inputRatioPrice,
+                          completion: completionTokens,
+                          compPrice: completionRatioPrice,
+                          ratio: groupRatio,
+                          fileSearchCallCount,
+                          fileSearchPrice,
+                          total: price.toFixed(6),
+                        },
+                      )
+                    : webSearch && webSearchCallCount > 0 && fileSearch && fileSearchCallCount > 0 && !image
+                      ? i18next.t(
+                          '输入 {{input}} tokens / 1M tokens * ${{price}} + 输出 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} + Web搜索 {{webSearchCallCount}}次 / 1K 次 * ${{webSearchPrice}} + 文件搜索 {{fileSearchCallCount}}次 / 1K 次 * ${{fileSearchPrice}} = ${{total}}',
+                          {
+                            input: inputTokens,
+                            price: inputRatioPrice,
+                            completion: completionTokens,
+                            compPrice: completionRatioPrice,
+                            ratio: groupRatio,
+                            webSearchCallCount,
+                            webSearchPrice,
+                            fileSearchCallCount,
+                            fileSearchPrice,
+                            total: price.toFixed(6),
+                          },
+                        )
+                      : i18next.t(
+                          '输入 {{input}} tokens / 1M tokens * ${{price}} + 输出 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} = ${{total}}',
+                          {
+                            input: inputTokens,
+                            price: inputRatioPrice,
+                            completion: completionTokens,
+                            compPrice: completionRatioPrice,
+                            ratio: groupRatio,
+                            total: price.toFixed(6),
+                          },
+                        )}
           </p>
           <p>{i18next.t('仅供参考,以实际扣费为准')}</p>
         </article>
@@ -448,33 +515,56 @@ export function renderLogContent(
   user_group_ratio,
   image = false,
   imageRatio = 1.0,
-  useUserGroupRatio = undefined
+  useUserGroupRatio = undefined,
+  webSearch = false,
+  webSearchCallCount = 0,
+  fileSearch = false,
+  fileSearchCallCount = 0,
 ) {
-  const ratioLabel = useUserGroupRatio ? i18next.t('专属倍率') : i18next.t('分组倍率');
+  const ratioLabel = useUserGroupRatio
+    ? i18next.t('专属倍率')
+    : i18next.t('分组倍率');
   const ratio = useUserGroupRatio ? user_group_ratio : groupRatio;
 
   if (modelPrice !== -1) {
     return i18next.t('模型价格 ${{price}},{{ratioType}} {{ratio}}', {
       price: modelPrice,
       ratioType: ratioLabel,
-      ratio
+      ratio,
     });
   } else {
     if (image) {
-      return i18next.t('模型倍率 {{modelRatio}},输出倍率 {{completionRatio}},图片输入倍率 {{imageRatio}},{{ratioType}} {{ratio}}', {
-        modelRatio: modelRatio,
-        completionRatio: completionRatio,
-        imageRatio: imageRatio,
-        ratioType: ratioLabel,
-        ratio
-      });
+      return i18next.t(
+        '模型倍率 {{modelRatio}},输出倍率 {{completionRatio}},图片输入倍率 {{imageRatio}},{{ratioType}} {{ratio}}',
+        {
+          modelRatio: modelRatio,
+          completionRatio: completionRatio,
+          imageRatio: imageRatio,
+          ratioType: ratioLabel,
+          ratio,
+        },
+      );
+    } else if (webSearch) {
+      return i18next.t(
+        '模型倍率 {{modelRatio}},输出倍率 {{completionRatio}},{{ratioType}} {{ratio}},Web 搜索调用 {{webSearchCallCount}} 次',
+        {
+          modelRatio: modelRatio,
+          completionRatio: completionRatio,
+          ratioType: ratioLabel,
+          ratio,
+          webSearchCallCount,
+        },
+      );
     } else {
-      return i18next.t('模型倍率 {{modelRatio}},输出倍率 {{completionRatio}},{{ratioType}} {{ratio}}', {
-        modelRatio: modelRatio,
-        completionRatio: completionRatio,
-        ratioType: ratioLabel,
-        ratio
-      });
+      return i18next.t(
+        '模型倍率 {{modelRatio}},输出倍率 {{completionRatio}},{{ratioType}} {{ratio}}',
+        {
+          modelRatio: modelRatio,
+          completionRatio: completionRatio,
+          ratioType: ratioLabel,
+          ratio,
+        },
+      );
     }
   }
 }