token_counter.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612
  1. package service
  2. import (
  3. "encoding/json"
  4. "errors"
  5. "fmt"
  6. "image"
  7. "log"
  8. "math"
  9. "one-api/common"
  10. "one-api/constant"
  11. "one-api/dto"
  12. relaycommon "one-api/relay/common"
  13. "one-api/types"
  14. "strings"
  15. "sync"
  16. "unicode/utf8"
  17. "github.com/gin-gonic/gin"
  18. "github.com/tiktoken-go/tokenizer"
  19. "github.com/tiktoken-go/tokenizer/codec"
  20. )
  21. // tokenEncoderMap won't grow after initialization
  22. var defaultTokenEncoder tokenizer.Codec
  23. // tokenEncoderMap is used to store token encoders for different models
  24. var tokenEncoderMap = make(map[string]tokenizer.Codec)
  25. // tokenEncoderMutex protects tokenEncoderMap for concurrent access
  26. var tokenEncoderMutex sync.RWMutex
  27. func InitTokenEncoders() {
  28. common.SysLog("initializing token encoders")
  29. defaultTokenEncoder = codec.NewCl100kBase()
  30. common.SysLog("token encoders initialized")
  31. }
  32. func getTokenEncoder(model string) tokenizer.Codec {
  33. // First, try to get the encoder from cache with read lock
  34. tokenEncoderMutex.RLock()
  35. if encoder, exists := tokenEncoderMap[model]; exists {
  36. tokenEncoderMutex.RUnlock()
  37. return encoder
  38. }
  39. tokenEncoderMutex.RUnlock()
  40. // If not in cache, create new encoder with write lock
  41. tokenEncoderMutex.Lock()
  42. defer tokenEncoderMutex.Unlock()
  43. // Double-check if another goroutine already created the encoder
  44. if encoder, exists := tokenEncoderMap[model]; exists {
  45. return encoder
  46. }
  47. // Create new encoder
  48. modelCodec, err := tokenizer.ForModel(tokenizer.Model(model))
  49. if err != nil {
  50. // Cache the default encoder for this model to avoid repeated failures
  51. tokenEncoderMap[model] = defaultTokenEncoder
  52. return defaultTokenEncoder
  53. }
  54. // Cache the new encoder
  55. tokenEncoderMap[model] = modelCodec
  56. return modelCodec
  57. }
  58. func getTokenNum(tokenEncoder tokenizer.Codec, text string) int {
  59. if text == "" {
  60. return 0
  61. }
  62. tkm, _ := tokenEncoder.Count(text)
  63. return tkm
  64. }
  65. func getImageToken(fileMeta *types.FileMeta, model string, stream bool) (int, error) {
  66. if fileMeta == nil {
  67. return 0, fmt.Errorf("image_url_is_nil")
  68. }
  69. // Defaults for 4o/4.1/4.5 family unless overridden below
  70. baseTokens := 85
  71. tileTokens := 170
  72. // Model classification
  73. lowerModel := strings.ToLower(model)
  74. // Special cases from existing behavior
  75. if strings.HasPrefix(lowerModel, "glm-4") {
  76. return 1047, nil
  77. }
  78. // Patch-based models (32x32 patches, capped at 1536, with multiplier)
  79. isPatchBased := false
  80. multiplier := 1.0
  81. switch {
  82. case strings.Contains(lowerModel, "gpt-4.1-mini"):
  83. isPatchBased = true
  84. multiplier = 1.62
  85. case strings.Contains(lowerModel, "gpt-4.1-nano"):
  86. isPatchBased = true
  87. multiplier = 2.46
  88. case strings.HasPrefix(lowerModel, "o4-mini"):
  89. isPatchBased = true
  90. multiplier = 1.72
  91. case strings.HasPrefix(lowerModel, "gpt-5-mini"):
  92. isPatchBased = true
  93. multiplier = 1.62
  94. case strings.HasPrefix(lowerModel, "gpt-5-nano"):
  95. isPatchBased = true
  96. multiplier = 2.46
  97. }
  98. // Tile-based model tokens and bases per doc
  99. if !isPatchBased {
  100. if strings.HasPrefix(lowerModel, "gpt-4o-mini") {
  101. baseTokens = 2833
  102. tileTokens = 5667
  103. } else if strings.HasPrefix(lowerModel, "gpt-5-chat-latest") || (strings.HasPrefix(lowerModel, "gpt-5") && !strings.Contains(lowerModel, "mini") && !strings.Contains(lowerModel, "nano")) {
  104. baseTokens = 70
  105. tileTokens = 140
  106. } else if strings.HasPrefix(lowerModel, "o1") || strings.HasPrefix(lowerModel, "o3") || strings.HasPrefix(lowerModel, "o1-pro") {
  107. baseTokens = 75
  108. tileTokens = 150
  109. } else if strings.Contains(lowerModel, "computer-use-preview") {
  110. baseTokens = 65
  111. tileTokens = 129
  112. } else if strings.Contains(lowerModel, "4.1") || strings.Contains(lowerModel, "4o") || strings.Contains(lowerModel, "4.5") {
  113. baseTokens = 85
  114. tileTokens = 170
  115. }
  116. }
  117. // Respect existing feature flags/short-circuits
  118. if fileMeta.Detail == "low" && !isPatchBased {
  119. return baseTokens, nil
  120. }
  121. if !constant.GetMediaTokenNotStream && !stream {
  122. return 3 * baseTokens, nil
  123. }
  124. // Normalize detail
  125. if fileMeta.Detail == "auto" || fileMeta.Detail == "" {
  126. fileMeta.Detail = "high"
  127. }
  128. // Whether to count image tokens at all
  129. if !constant.GetMediaToken {
  130. return 3 * baseTokens, nil
  131. }
  132. // Decode image to get dimensions
  133. var config image.Config
  134. var err error
  135. var format string
  136. var b64str string
  137. if strings.HasPrefix(fileMeta.Data, "http") {
  138. config, format, err = DecodeUrlImageData(fileMeta.Data)
  139. } else {
  140. common.SysLog(fmt.Sprintf("decoding image"))
  141. config, format, b64str, err = DecodeBase64ImageData(fileMeta.Data)
  142. }
  143. if err != nil {
  144. return 0, err
  145. }
  146. fileMeta.MimeType = format
  147. if config.Width == 0 || config.Height == 0 {
  148. // not an image
  149. if format != "" && b64str != "" {
  150. // file type
  151. return 3 * baseTokens, nil
  152. }
  153. return 0, errors.New(fmt.Sprintf("fail to decode base64 config: %s", fileMeta.Data))
  154. }
  155. width := config.Width
  156. height := config.Height
  157. log.Printf("format: %s, width: %d, height: %d", format, width, height)
  158. if isPatchBased {
  159. // 32x32 patch-based calculation with 1536 cap and model multiplier
  160. ceilDiv := func(a, b int) int { return (a + b - 1) / b }
  161. rawPatchesW := ceilDiv(width, 32)
  162. rawPatchesH := ceilDiv(height, 32)
  163. rawPatches := rawPatchesW * rawPatchesH
  164. if rawPatches > 1536 {
  165. // scale down
  166. area := float64(width * height)
  167. r := math.Sqrt(float64(32*32*1536) / area)
  168. wScaled := float64(width) * r
  169. hScaled := float64(height) * r
  170. // adjust to fit whole number of patches after scaling
  171. adjW := math.Floor(wScaled/32.0) / (wScaled / 32.0)
  172. adjH := math.Floor(hScaled/32.0) / (hScaled / 32.0)
  173. adj := math.Min(adjW, adjH)
  174. if !math.IsNaN(adj) && adj > 0 {
  175. r = r * adj
  176. }
  177. wScaled = float64(width) * r
  178. hScaled = float64(height) * r
  179. patchesW := math.Ceil(wScaled / 32.0)
  180. patchesH := math.Ceil(hScaled / 32.0)
  181. imageTokens := int(patchesW * patchesH)
  182. if imageTokens > 1536 {
  183. imageTokens = 1536
  184. }
  185. return int(math.Round(float64(imageTokens) * multiplier)), nil
  186. }
  187. // below cap
  188. imageTokens := rawPatches
  189. return int(math.Round(float64(imageTokens) * multiplier)), nil
  190. }
  191. // Tile-based calculation for 4o/4.1/4.5/o1/o3/etc.
  192. // Step 1: fit within 2048x2048 square
  193. maxSide := math.Max(float64(width), float64(height))
  194. fitScale := 1.0
  195. if maxSide > 2048 {
  196. fitScale = maxSide / 2048.0
  197. }
  198. fitW := int(math.Round(float64(width) / fitScale))
  199. fitH := int(math.Round(float64(height) / fitScale))
  200. // Step 2: scale so that shortest side is exactly 768
  201. minSide := math.Min(float64(fitW), float64(fitH))
  202. if minSide == 0 {
  203. return baseTokens, nil
  204. }
  205. shortScale := 768.0 / minSide
  206. finalW := int(math.Round(float64(fitW) * shortScale))
  207. finalH := int(math.Round(float64(fitH) * shortScale))
  208. // Count 512px tiles
  209. tilesW := (finalW + 512 - 1) / 512
  210. tilesH := (finalH + 512 - 1) / 512
  211. tiles := tilesW * tilesH
  212. if common.DebugEnabled {
  213. log.Printf("scaled to: %dx%d, tiles: %d", finalW, finalH, tiles)
  214. }
  215. return tiles*tileTokens + baseTokens, nil
  216. }
  217. func CountRequestToken(c *gin.Context, meta *types.TokenCountMeta, info *relaycommon.RelayInfo) (int, error) {
  218. if meta == nil {
  219. return 0, errors.New("token count meta is nil")
  220. }
  221. if info.RelayFormat == types.RelayFormatOpenAIRealtime {
  222. return 0, nil
  223. }
  224. model := common.GetContextKeyString(c, constant.ContextKeyOriginalModel)
  225. tkm := 0
  226. if meta.TokenType == types.TokenTypeTextNumber {
  227. tkm += utf8.RuneCountInString(meta.CombineText)
  228. } else {
  229. tkm += CountTextToken(meta.CombineText, model)
  230. }
  231. if info.RelayFormat == types.RelayFormatOpenAI {
  232. tkm += meta.ToolsCount * 8
  233. tkm += meta.MessagesCount * 3 // 每条消息的格式化token数量
  234. tkm += meta.NameCount * 3
  235. tkm += 3
  236. }
  237. for _, file := range meta.Files {
  238. switch file.FileType {
  239. case types.FileTypeImage:
  240. if info.RelayFormat == types.RelayFormatGemini {
  241. tkm += 240
  242. } else {
  243. token, err := getImageToken(file, model, info.IsStream)
  244. if err != nil {
  245. return 0, fmt.Errorf("error counting image token: %v", err)
  246. }
  247. tkm += token
  248. }
  249. case types.FileTypeAudio:
  250. tkm += 100
  251. case types.FileTypeVideo:
  252. tkm += 5000
  253. case types.FileTypeFile:
  254. tkm += 5000
  255. }
  256. }
  257. common.SetContextKey(c, constant.ContextKeyPromptTokens, tkm)
  258. return tkm, nil
  259. }
  260. //func CountTokenChatRequest(info *relaycommon.RelayInfo, request dto.GeneralOpenAIRequest) (int, error) {
  261. // tkm := 0
  262. // msgTokens, err := CountTokenMessages(info, request.Messages, request.Model, request.Stream)
  263. // if err != nil {
  264. // return 0, err
  265. // }
  266. // tkm += msgTokens
  267. // if request.Tools != nil {
  268. // openaiTools := request.Tools
  269. // countStr := ""
  270. // for _, tool := range openaiTools {
  271. // countStr = tool.Function.Name
  272. // if tool.Function.Description != "" {
  273. // countStr += tool.Function.Description
  274. // }
  275. // if tool.Function.Parameters != nil {
  276. // countStr += fmt.Sprintf("%v", tool.Function.Parameters)
  277. // }
  278. // }
  279. // toolTokens := CountTokenInput(countStr, request.Model)
  280. // tkm += 8
  281. // tkm += toolTokens
  282. // }
  283. //
  284. // return tkm, nil
  285. //}
  286. func CountTokenClaudeRequest(request dto.ClaudeRequest, model string) (int, error) {
  287. tkm := 0
  288. // Count tokens in messages
  289. msgTokens, err := CountTokenClaudeMessages(request.Messages, model, request.Stream)
  290. if err != nil {
  291. return 0, err
  292. }
  293. tkm += msgTokens
  294. // Count tokens in system message
  295. if request.System != "" {
  296. systemTokens := CountTokenInput(request.System, model)
  297. tkm += systemTokens
  298. }
  299. if request.Tools != nil {
  300. // check is array
  301. if tools, ok := request.Tools.([]any); ok {
  302. if len(tools) > 0 {
  303. parsedTools, err1 := common.Any2Type[[]dto.Tool](request.Tools)
  304. if err1 != nil {
  305. return 0, fmt.Errorf("tools: Input should be a valid list: %v", err)
  306. }
  307. toolTokens, err2 := CountTokenClaudeTools(parsedTools, model)
  308. if err2 != nil {
  309. return 0, fmt.Errorf("tools: %v", err)
  310. }
  311. tkm += toolTokens
  312. }
  313. } else {
  314. return 0, errors.New("tools: Input should be a valid list")
  315. }
  316. }
  317. return tkm, nil
  318. }
  319. func CountTokenClaudeMessages(messages []dto.ClaudeMessage, model string, stream bool) (int, error) {
  320. tokenEncoder := getTokenEncoder(model)
  321. tokenNum := 0
  322. for _, message := range messages {
  323. // Count tokens for role
  324. tokenNum += getTokenNum(tokenEncoder, message.Role)
  325. if message.IsStringContent() {
  326. tokenNum += getTokenNum(tokenEncoder, message.GetStringContent())
  327. } else {
  328. content, err := message.ParseContent()
  329. if err != nil {
  330. return 0, err
  331. }
  332. for _, mediaMessage := range content {
  333. switch mediaMessage.Type {
  334. case "text":
  335. tokenNum += getTokenNum(tokenEncoder, mediaMessage.GetText())
  336. case "image":
  337. //imageTokenNum, err := getClaudeImageToken(mediaMsg.Source, model, stream)
  338. //if err != nil {
  339. // return 0, err
  340. //}
  341. tokenNum += 1000
  342. case "tool_use":
  343. if mediaMessage.Input != nil {
  344. tokenNum += getTokenNum(tokenEncoder, mediaMessage.Name)
  345. inputJSON, _ := json.Marshal(mediaMessage.Input)
  346. tokenNum += getTokenNum(tokenEncoder, string(inputJSON))
  347. }
  348. case "tool_result":
  349. if mediaMessage.Content != nil {
  350. contentJSON, _ := json.Marshal(mediaMessage.Content)
  351. tokenNum += getTokenNum(tokenEncoder, string(contentJSON))
  352. }
  353. }
  354. }
  355. }
  356. }
  357. // Add a constant for message formatting (this may need adjustment based on Claude's exact formatting)
  358. tokenNum += len(messages) * 2 // Assuming 2 tokens per message for formatting
  359. return tokenNum, nil
  360. }
  361. func CountTokenClaudeTools(tools []dto.Tool, model string) (int, error) {
  362. tokenEncoder := getTokenEncoder(model)
  363. tokenNum := 0
  364. for _, tool := range tools {
  365. tokenNum += getTokenNum(tokenEncoder, tool.Name)
  366. tokenNum += getTokenNum(tokenEncoder, tool.Description)
  367. schemaJSON, err := json.Marshal(tool.InputSchema)
  368. if err != nil {
  369. return 0, errors.New(fmt.Sprintf("marshal_tool_schema_fail: %s", err.Error()))
  370. }
  371. tokenNum += getTokenNum(tokenEncoder, string(schemaJSON))
  372. }
  373. // Add a constant for tool formatting (this may need adjustment based on Claude's exact formatting)
  374. tokenNum += len(tools) * 3 // Assuming 3 tokens per tool for formatting
  375. return tokenNum, nil
  376. }
  377. func CountTokenRealtime(info *relaycommon.RelayInfo, request dto.RealtimeEvent, model string) (int, int, error) {
  378. audioToken := 0
  379. textToken := 0
  380. switch request.Type {
  381. case dto.RealtimeEventTypeSessionUpdate:
  382. if request.Session != nil {
  383. msgTokens := CountTextToken(request.Session.Instructions, model)
  384. textToken += msgTokens
  385. }
  386. case dto.RealtimeEventResponseAudioDelta:
  387. // count audio token
  388. atk, err := CountAudioTokenOutput(request.Delta, info.OutputAudioFormat)
  389. if err != nil {
  390. return 0, 0, fmt.Errorf("error counting audio token: %v", err)
  391. }
  392. audioToken += atk
  393. case dto.RealtimeEventResponseAudioTranscriptionDelta, dto.RealtimeEventResponseFunctionCallArgumentsDelta:
  394. // count text token
  395. tkm := CountTextToken(request.Delta, model)
  396. textToken += tkm
  397. case dto.RealtimeEventInputAudioBufferAppend:
  398. // count audio token
  399. atk, err := CountAudioTokenInput(request.Audio, info.InputAudioFormat)
  400. if err != nil {
  401. return 0, 0, fmt.Errorf("error counting audio token: %v", err)
  402. }
  403. audioToken += atk
  404. case dto.RealtimeEventConversationItemCreated:
  405. if request.Item != nil {
  406. switch request.Item.Type {
  407. case "message":
  408. for _, content := range request.Item.Content {
  409. if content.Type == "input_text" {
  410. tokens := CountTextToken(content.Text, model)
  411. textToken += tokens
  412. }
  413. }
  414. }
  415. }
  416. case dto.RealtimeEventTypeResponseDone:
  417. // count tools token
  418. if !info.IsFirstRequest {
  419. if info.RealtimeTools != nil && len(info.RealtimeTools) > 0 {
  420. for _, tool := range info.RealtimeTools {
  421. toolTokens := CountTokenInput(tool, model)
  422. textToken += 8
  423. textToken += toolTokens
  424. }
  425. }
  426. }
  427. }
  428. return textToken, audioToken, nil
  429. }
  430. //func CountTokenMessages(info *relaycommon.RelayInfo, messages []dto.Message, model string, stream bool) (int, error) {
  431. // //recover when panic
  432. // tokenEncoder := getTokenEncoder(model)
  433. // // Reference:
  434. // // https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
  435. // // https://github.com/pkoukk/tiktoken-go/issues/6
  436. // //
  437. // // Every message follows <|start|>{role/name}\n{content}<|end|>\n
  438. // var tokensPerMessage int
  439. // var tokensPerName int
  440. //
  441. // tokensPerMessage = 3
  442. // tokensPerName = 1
  443. //
  444. // tokenNum := 0
  445. // for _, message := range messages {
  446. // tokenNum += tokensPerMessage
  447. // tokenNum += getTokenNum(tokenEncoder, message.Role)
  448. // if message.Content != nil {
  449. // if message.Name != nil {
  450. // tokenNum += tokensPerName
  451. // tokenNum += getTokenNum(tokenEncoder, *message.Name)
  452. // }
  453. // arrayContent := message.ParseContent()
  454. // for _, m := range arrayContent {
  455. // if m.Type == dto.ContentTypeImageURL {
  456. // imageUrl := m.GetImageMedia()
  457. // imageTokenNum, err := getImageToken(info, imageUrl, model, stream)
  458. // if err != nil {
  459. // return 0, err
  460. // }
  461. // tokenNum += imageTokenNum
  462. // log.Printf("image token num: %d", imageTokenNum)
  463. // } else if m.Type == dto.ContentTypeInputAudio {
  464. // // TODO: 音频token数量计算
  465. // tokenNum += 100
  466. // } else if m.Type == dto.ContentTypeFile {
  467. // tokenNum += 5000
  468. // } else if m.Type == dto.ContentTypeVideoUrl {
  469. // tokenNum += 5000
  470. // } else {
  471. // tokenNum += getTokenNum(tokenEncoder, m.Text)
  472. // }
  473. // }
  474. // }
  475. // }
  476. // tokenNum += 3 // Every reply is primed with <|start|>assistant<|message|>
  477. // return tokenNum, nil
  478. //}
  479. func CountTokenInput(input any, model string) int {
  480. switch v := input.(type) {
  481. case string:
  482. return CountTextToken(v, model)
  483. case []string:
  484. text := ""
  485. for _, s := range v {
  486. text += s
  487. }
  488. return CountTextToken(text, model)
  489. case []interface{}:
  490. text := ""
  491. for _, item := range v {
  492. text += fmt.Sprintf("%v", item)
  493. }
  494. return CountTextToken(text, model)
  495. }
  496. return CountTokenInput(fmt.Sprintf("%v", input), model)
  497. }
  498. func CountTokenStreamChoices(messages []dto.ChatCompletionsStreamResponseChoice, model string) int {
  499. tokens := 0
  500. for _, message := range messages {
  501. tkm := CountTokenInput(message.Delta.GetContentString(), model)
  502. tokens += tkm
  503. if message.Delta.ToolCalls != nil {
  504. for _, tool := range message.Delta.ToolCalls {
  505. tkm := CountTokenInput(tool.Function.Name, model)
  506. tokens += tkm
  507. tkm = CountTokenInput(tool.Function.Arguments, model)
  508. tokens += tkm
  509. }
  510. }
  511. }
  512. return tokens
  513. }
  514. func CountTTSToken(text string, model string) int {
  515. if strings.HasPrefix(model, "tts") {
  516. return utf8.RuneCountInString(text)
  517. } else {
  518. return CountTextToken(text, model)
  519. }
  520. }
  521. func CountAudioTokenInput(audioBase64 string, audioFormat string) (int, error) {
  522. if audioBase64 == "" {
  523. return 0, nil
  524. }
  525. duration, err := parseAudio(audioBase64, audioFormat)
  526. if err != nil {
  527. return 0, err
  528. }
  529. return int(duration / 60 * 100 / 0.06), nil
  530. }
  531. func CountAudioTokenOutput(audioBase64 string, audioFormat string) (int, error) {
  532. if audioBase64 == "" {
  533. return 0, nil
  534. }
  535. duration, err := parseAudio(audioBase64, audioFormat)
  536. if err != nil {
  537. return 0, err
  538. }
  539. return int(duration / 60 * 200 / 0.24), nil
  540. }
  541. //func CountAudioToken(sec float64, audioType string) {
  542. // if audioType == "input" {
  543. //
  544. // }
  545. //}
  546. // CountTextToken 统计文本的token数量,仅当文本包含敏感词,返回错误,同时返回token数量
  547. func CountTextToken(text string, model string) int {
  548. if text == "" {
  549. return 0
  550. }
  551. tokenEncoder := getTokenEncoder(model)
  552. return getTokenNum(tokenEncoder, text)
  553. }