inference.py 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997
  1. import os
  2. import queue
  3. import re
  4. import threading
  5. import time
  6. import traceback
  7. from copy import deepcopy
  8. from dataclasses import dataclass
  9. from pathlib import Path
  10. from typing import Callable, Literal, Optional, Tuple, Union
  11. import click
  12. import numpy as np
  13. import torch
  14. import torch._inductor.config
  15. from loguru import logger
  16. from tqdm import tqdm
  17. from fish_speech.content_sequence import (
  18. TextPart,
  19. VQPart,
  20. )
  21. from fish_speech.conversation import Conversation, Message
  22. from fish_speech.tokenizer import IM_END_TOKEN
  23. os.environ["TOKENIZERS_PARALLELISM"] = "false"
  24. torch._inductor.config.coordinate_descent_tuning = True
  25. torch._inductor.config.triton.unique_kernel_names = True
  26. if hasattr(torch._inductor.config, "fx_graph_cache"):
  27. torch._inductor.config.fx_graph_cache = True
  28. from torch.nn.attention import SDPBackend, sdpa_kernel
  29. from fish_speech.models.text2semantic.llama import (
  30. DualARTransformer,
  31. )
  32. def multinomial_sample_one_no_sync(probs_sort):
  33. q = torch.rand_like(probs_sort)
  34. q = -torch.log(q)
  35. return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
  36. RAS_WIN_SIZE = 10 # window for Repetition Aware Sampling
  37. RAS_HIGH_TEMP = 1.0
  38. RAS_HIGH_TOP_P = 0.9
  39. def logits_to_probs(
  40. logits,
  41. temperature: torch.Tensor,
  42. top_p: torch.Tensor,
  43. top_k: int, # 注意: 我看到你传进来的是 int,这很关键
  44. ) -> torch.Tensor:
  45. sorted_logits, sorted_indices = torch.sort(logits, descending=True)
  46. cum_probs = torch.cumsum(torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1)
  47. indices = torch.arange(sorted_logits.shape[-1], device=sorted_logits.device)
  48. top_k_mask = indices >= top_k
  49. sorted_indices_to_remove = (cum_probs > top_p) | top_k_mask
  50. sorted_indices_to_remove[0] = False # 单元素修改问题不大,或者写成 | (indices != 0)
  51. indices_to_remove = sorted_indices_to_remove.scatter(
  52. dim=-1, index=sorted_indices, src=sorted_indices_to_remove
  53. )
  54. logits = torch.where(
  55. indices_to_remove, float("-Inf"), logits
  56. ) # 同样替换 masked_fill_ 为 torch.where
  57. logits = logits / torch.clip(temperature, min=1e-5)
  58. probs = torch.nn.functional.softmax(logits, dim=-1)
  59. return probs
  60. def sample(
  61. logits,
  62. temperature: torch.Tensor,
  63. top_p: torch.Tensor,
  64. top_k: int,
  65. ) -> Tuple[torch.Tensor, torch.Tensor]:
  66. probs = logits_to_probs(
  67. logits=logits[0, -1],
  68. temperature=temperature,
  69. top_p=top_p,
  70. top_k=top_k,
  71. )
  72. idx_next = multinomial_sample_one_no_sync(probs)
  73. return idx_next, probs
  74. def decode_one_token_ar(
  75. model: DualARTransformer,
  76. x: torch.Tensor,
  77. input_pos: torch.Tensor,
  78. temperature: torch.Tensor,
  79. top_p: torch.Tensor,
  80. top_k: int,
  81. semantic_logit_bias: torch.Tensor,
  82. audio_masks: torch.Tensor,
  83. audio_parts: torch.Tensor,
  84. previous_tokens: Optional[torch.Tensor] = None,
  85. ) -> torch.Tensor:
  86. forward_result = model.forward_generate(
  87. x,
  88. input_pos,
  89. audio_masks=audio_masks,
  90. audio_parts=audio_parts,
  91. )
  92. logits = forward_result.logits # (1, 1, vocab_size)
  93. hidden_states = forward_result.hidden_states
  94. # Apply constrained decoding: only allow semantic tokens + im_end
  95. biased_logits = logits + semantic_logit_bias
  96. # Normal sample
  97. main_token_normal = sample(
  98. biased_logits, temperature=temperature, top_p=top_p, top_k=top_k
  99. )[0]
  100. # RAS: also sample with high temp to use as fallback if token repeats
  101. high_temp = torch.tensor(
  102. RAS_HIGH_TEMP, device=temperature.device, dtype=temperature.dtype
  103. )
  104. high_top_p = torch.tensor(RAS_HIGH_TOP_P, device=top_p.device, dtype=top_p.dtype)
  105. main_token_high = sample(
  106. biased_logits, temperature=high_temp, top_p=high_top_p, top_k=top_k
  107. )[0]
  108. # Use high-temp sample if: token is semantic AND token is in previous window
  109. if previous_tokens is not None:
  110. in_window = (previous_tokens[0] == main_token_normal).any()
  111. # Use tensor ops (&, torch.where) instead of Python (and, if) — torch.compile requires no data-dependent branching
  112. is_semantic = (main_token_normal >= model.config.semantic_begin_id) & (
  113. main_token_normal <= model.config.semantic_end_id
  114. )
  115. should_use_high = in_window & is_semantic
  116. main_token_normal = torch.where(
  117. should_use_high, main_token_high, main_token_normal
  118. )
  119. codebooks = [main_token_normal]
  120. input_pos = torch.tensor([0], device=hidden_states.device, dtype=torch.long)
  121. model.forward_generate_fast(hidden_states, input_pos)
  122. a = codebooks[0] - model.config.semantic_begin_id
  123. a = torch.clamp(a, min=0, max=model.config.codebook_size - 1)
  124. hidden_states = model.fast_embeddings(a)
  125. codebooks.append(a)
  126. for codebook_idx in range(1, model.config.num_codebooks):
  127. input_pos = torch.tensor(
  128. [codebook_idx], device=hidden_states.device, dtype=torch.long
  129. )
  130. logits = model.forward_generate_fast(hidden_states, input_pos)
  131. short_logits = logits # DualAR predicts config.codebook_size number of tokens
  132. # Convert logits to probs (no constrain for fast codebooks)
  133. a = sample(
  134. short_logits,
  135. temperature=temperature,
  136. top_p=top_p,
  137. top_k=top_k,
  138. )[0]
  139. hidden_states = model.fast_embeddings(a)
  140. codebooks.append(a)
  141. codebooks = torch.stack(codebooks, dim=1)
  142. # Only delete references, let Python GC handle cleanup
  143. del logits, hidden_states, forward_result
  144. return codebooks.T
  145. def decode_n_tokens(
  146. model: DualARTransformer,
  147. cur_token: torch.Tensor,
  148. input_pos: torch.Tensor,
  149. num_new_tokens: int,
  150. temperature: torch.Tensor,
  151. top_p: torch.Tensor,
  152. top_k: int,
  153. semantic_logit_bias: torch.Tensor,
  154. audio_masks: torch.Tensor,
  155. audio_parts: torch.Tensor,
  156. decode_one_token=decode_one_token_ar,
  157. ):
  158. # Rolling window for RAS (Repetition Aware Sampling)
  159. previous_tokens = torch.zeros(
  160. (model.config.num_codebooks + 1, RAS_WIN_SIZE),
  161. dtype=torch.int,
  162. device=cur_token.device,
  163. )
  164. # Accumulate all generated tokens (the actual output)
  165. new_tokens = []
  166. # [MODIFIED] Pre-fetch ID for efficiency loop
  167. im_end_id = model.tokenizer.get_token_id(IM_END_TOKEN)
  168. for i in tqdm(range(num_new_tokens)):
  169. with sdpa_kernel(SDPBackend.MATH):
  170. next_token = decode_one_token(
  171. model=model,
  172. x=cur_token,
  173. input_pos=input_pos,
  174. previous_tokens=previous_tokens,
  175. temperature=temperature,
  176. top_p=top_p,
  177. top_k=top_k,
  178. semantic_logit_bias=semantic_logit_bias,
  179. audio_masks=audio_masks,
  180. audio_parts=audio_parts,
  181. ).clone()
  182. input_pos += 1
  183. cur_token = next_token.view(1, model.config.num_codebooks + 1, -1)
  184. # Roll RAS window left and insert new token at end
  185. previous_tokens = previous_tokens.roll(-1, dims=1)
  186. previous_tokens[:, -1] = next_token.view(model.config.num_codebooks + 1, -1)[
  187. :, 0
  188. ]
  189. new_tokens.append(next_token)
  190. if cur_token[0, 0, -1] == im_end_id:
  191. break
  192. del cur_token
  193. return torch.cat(new_tokens, dim=1)
  194. @torch.no_grad()
  195. @torch.inference_mode()
  196. def generate(
  197. *,
  198. model: DualARTransformer,
  199. prompt: torch.Tensor,
  200. max_new_tokens: int,
  201. audio_masks: torch.Tensor,
  202. audio_parts: torch.Tensor,
  203. decode_one_token=decode_one_token_ar,
  204. num_samples: int = 1,
  205. **sampling_kwargs,
  206. ):
  207. """
  208. Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested.
  209. """
  210. # create an empty tensor of the expected final shape and fill in the current tokens
  211. T = prompt.size(1)
  212. prompt = prompt[None].repeat(num_samples, 1, 1)
  213. if T >= model.config.max_seq_len:
  214. raise ValueError(
  215. f"Input sequence length {T} exceeds max_seq_len {model.config.max_seq_len}"
  216. )
  217. if max_new_tokens:
  218. if T + max_new_tokens > model.config.max_seq_len:
  219. max_new_tokens = model.config.max_seq_len - T
  220. T_new = T + max_new_tokens
  221. else:
  222. T_new = model.config.max_seq_len
  223. max_new_tokens = T_new - T
  224. device = prompt.device
  225. dtype = next(
  226. model.parameters()
  227. ).dtype # model weight dtype (bfloat16), NOT prompt dtype (int32)
  228. # Critical fix: Only set up cache on first run or when necessary
  229. if not hasattr(model, "_cache_setup_done") or not model._cache_setup_done:
  230. with torch.device(device):
  231. model.setup_caches(
  232. max_batch_size=1, # Fixed to 1, avoid dynamic changes
  233. max_seq_len=model.config.max_seq_len,
  234. dtype=next(model.parameters()).dtype,
  235. )
  236. model._cache_setup_done = True
  237. codebook_dim = 1 + model.config.num_codebooks
  238. # Create new tensor each time, but try to reuse memory
  239. input_pos = torch.arange(0, T, device=device, dtype=torch.long)
  240. empty = torch.empty(
  241. (codebook_dim, model.config.max_seq_len), dtype=prompt.dtype, device=device
  242. )
  243. empty[:, :T] = prompt
  244. seq = empty
  245. temp_val = sampling_kwargs.get("temperature", 1.0)
  246. top_p_val = sampling_kwargs.get("top_p", 0.9)
  247. top_k_val = sampling_kwargs.get("top_k", 30)
  248. temperature = torch.tensor(temp_val, device=device, dtype=dtype)
  249. top_p = torch.tensor(top_p_val, device=device, dtype=dtype)
  250. # Build semantic logit bias: 0 for semantic tokens + im_end, -inf for all others
  251. vocab_size = model.config.vocab_size
  252. semantic_logit_bias = torch.full(
  253. (1, 1, vocab_size), float("-inf"), device=device, dtype=dtype
  254. )
  255. # [MODIFIED] Use config for semantic range
  256. semantic_logit_bias[
  257. 0, 0, model.config.semantic_begin_id : model.config.semantic_end_id + 1
  258. ] = 0.0
  259. # [MODIFIED] Use tokenizer.get_token_id (Wrapper method)
  260. semantic_logit_bias[0, 0, model.tokenizer.get_token_id(IM_END_TOKEN)] = 0.0
  261. prefill_decode = decode_one_token_ar
  262. first_token = prefill_decode(
  263. model,
  264. prompt.view(1, codebook_dim, -1),
  265. input_pos,
  266. temperature,
  267. top_p,
  268. top_k_val,
  269. semantic_logit_bias,
  270. audio_masks,
  271. audio_parts,
  272. )
  273. seq[:, T : T + 1] = first_token
  274. # Recreate input_pos
  275. input_pos = torch.tensor([T], device=device, dtype=torch.int)
  276. x = decode_n_tokens(
  277. model,
  278. first_token.view(1, codebook_dim, -1),
  279. input_pos,
  280. max_new_tokens - 1,
  281. temperature=temperature,
  282. top_p=top_p,
  283. top_k=top_k_val,
  284. semantic_logit_bias=semantic_logit_bias,
  285. audio_masks=audio_masks,
  286. audio_parts=audio_parts,
  287. decode_one_token=decode_one_token,
  288. )
  289. seq = seq[:, : T + 1 + x.size(1)]
  290. seq[:, T + 1 :] = x
  291. # Clean up temporary variables
  292. del first_token, x, prompt, empty, input_pos
  293. return seq
  294. def init_model(checkpoint_path, device, precision, compile=False):
  295. model = DualARTransformer.from_pretrained(checkpoint_path, load_weights=True)
  296. logger.info(f"precision: {precision.__class__.__name__}")
  297. model = model.to(device=device, dtype=precision)
  298. logger.info(f"Restored model from checkpoint")
  299. if isinstance(model, DualARTransformer):
  300. decode_one_token = decode_one_token_ar
  301. # prefill_n_tokens = decode_one_token_ar
  302. logger.info("Using DualARTransformer")
  303. else:
  304. raise ValueError("Unsupported model type")
  305. # Pre-create fixed parameter tensors to avoid runtime creation
  306. model.fixed_temperature = torch.tensor(0.7, device=device, dtype=torch.float)
  307. model.fixed_top_p = torch.tensor(0.7, device=device, dtype=torch.float)
  308. model.fixed_repetition_penalty = torch.tensor(1.5, device=device, dtype=torch.float)
  309. # Mark whether cache has been initialized
  310. model._cache_setup_done = False
  311. if compile:
  312. logger.info("Compiling function...")
  313. decode_one_token = torch.compile(
  314. decode_one_token,
  315. backend="inductor" if torch.cuda.is_available() else "aot_eager",
  316. mode="default" if torch.cuda.is_available() else None,
  317. fullgraph=True,
  318. )
  319. return model.eval(), decode_one_token
  320. @torch.inference_mode()
  321. def load_codec_model(codec_checkpoint_path, device, precision=torch.bfloat16):
  322. """Load the DAC codec model for audio encoding/decoding."""
  323. from hydra.utils import instantiate
  324. from omegaconf import OmegaConf
  325. config_path = Path(__file__).parent.parent.parent / "configs" / "modded_dac_vq.yaml"
  326. cfg = OmegaConf.load(str(config_path))
  327. codec = instantiate(cfg)
  328. state_dict = torch.load(codec_checkpoint_path, map_location="cpu")
  329. if "state_dict" in state_dict:
  330. state_dict = state_dict["state_dict"]
  331. if any("generator" in k for k in state_dict):
  332. state_dict = {
  333. k.replace("generator.", ""): v
  334. for k, v in state_dict.items()
  335. if "generator." in k
  336. }
  337. codec.load_state_dict(state_dict, strict=False)
  338. codec.eval()
  339. codec.to(device=device, dtype=precision)
  340. return codec
  341. @torch.inference_mode()
  342. def encode_audio(audio_path, codec, device):
  343. """Encode an audio file to VQ codes."""
  344. import torchaudio
  345. wav, sr = torchaudio.load(str(audio_path))
  346. if wav.shape[0] > 1:
  347. wav = wav.mean(dim=0, keepdim=True)
  348. wav = torchaudio.functional.resample(wav.to(device), sr, codec.sample_rate)[0]
  349. # Match codec model dtype (e.g. bfloat16)
  350. model_dtype = next(codec.parameters()).dtype
  351. audios = wav[None, None].to(dtype=model_dtype) # (1, 1, T)
  352. audio_lengths = torch.tensor([len(wav)], device=device, dtype=torch.long)
  353. indices, feature_lengths = codec.encode(audios, audio_lengths)
  354. return indices[0, :, : feature_lengths[0]] # (num_codebooks, T)
  355. @torch.inference_mode()
  356. def decode_to_audio(codes, codec):
  357. """Decode VQ codes to audio waveform."""
  358. # codes: (num_codebooks, T) -> (1, num_codebooks, T)
  359. audio = codec.from_indices(codes[None])
  360. return audio[0, 0] # (T,) mono waveform
  361. @dataclass
  362. class GenerateResponse:
  363. action: Literal["sample", "next"]
  364. codes: Optional[torch.Tensor] = None
  365. text: Optional[str] = None
  366. def split_text_by_speaker(text: str) -> list[str]:
  367. """
  368. Split text into turns based on <|speaker:X|> tags.
  369. Args:
  370. text: The full text with speaker tags
  371. Returns:
  372. List of speaker turns, each starting with <|speaker:X|>
  373. """
  374. pattern = r"(<\|speaker:\d+\|>)"
  375. parts = re.split(pattern, text)
  376. turns = []
  377. i = 0
  378. while i < len(parts):
  379. part = parts[i].strip()
  380. if re.match(pattern, part):
  381. if i + 1 < len(parts):
  382. turn = part + parts[i + 1]
  383. turns.append(turn.strip())
  384. i += 2
  385. else:
  386. turns.append(part)
  387. i += 1
  388. else:
  389. i += 1
  390. return turns
  391. def group_turns_into_batches(
  392. turns: list[str], max_speakers: int = 3, max_bytes: int = 300
  393. ) -> list[str]:
  394. """
  395. Group turns into batches based on speaker count or byte limit.
  396. Args:
  397. turns: List of speaker turns
  398. max_speakers: Maximum number of speakers per batch (default 3)
  399. max_bytes: Maximum UTF-8 bytes per batch (default 300)
  400. Returns:
  401. List of batched text strings
  402. """
  403. batches = []
  404. current_batch = []
  405. current_bytes = 0
  406. for turn in turns:
  407. turn_bytes = len(turn.encode("utf-8"))
  408. would_exceed_speakers = len(current_batch) >= max_speakers
  409. would_exceed_bytes = current_bytes + turn_bytes > max_bytes and current_batch
  410. if would_exceed_speakers or would_exceed_bytes:
  411. batches.append("\n".join(current_batch))
  412. current_batch = [turn]
  413. current_bytes = turn_bytes
  414. else:
  415. current_batch.append(turn)
  416. current_bytes += turn_bytes
  417. if current_batch:
  418. batches.append("\n".join(current_batch))
  419. return batches
  420. def generate_long(
  421. *,
  422. model,
  423. device: Union[str, torch.device],
  424. decode_one_token: Callable,
  425. text: str,
  426. num_samples: int = 1,
  427. max_new_tokens: int = 0,
  428. top_p: float = 0.9,
  429. top_k: int = 30,
  430. repetition_penalty: float = 1.1,
  431. temperature: float = 1.0,
  432. compile: bool = False,
  433. iterative_prompt: bool = True,
  434. chunk_length: int = 512,
  435. prompt_text: Optional[Union[str, list[str]]] = None,
  436. prompt_tokens: Optional[Union[torch.Tensor, list[torch.Tensor]]] = None,
  437. ):
  438. assert 0 < top_p <= 1, "top_p must be in (0, 1]"
  439. assert 0 < temperature < 2, "temperature must be in (0, 2)"
  440. logger.info(f"generate_long.param.device: {device}")
  441. logger.info(f"generate_long.param.text: {text}")
  442. logger.info(f"generate_long.param.max_new_tokens: {max_new_tokens}")
  443. logger.info(f"generate_long.param.top_p: {top_p}")
  444. logger.info(f"generate_long.param.top_k: {top_k}")
  445. logger.info(f"generate_long.param.temperature: {temperature}")
  446. logger.info(f"generate_long.param.compile: {compile}")
  447. logger.info(f"generate_long.param.chunk_length: {chunk_length}")
  448. logger.info(f"generate_long.param.prompt_text: {prompt_text}")
  449. logger.info(f"generate_long.param.prompt_tokens: {prompt_tokens}")
  450. use_prompt = bool(prompt_text) and bool(prompt_tokens)
  451. if use_prompt and isinstance(prompt_text, str):
  452. prompt_text = [prompt_text]
  453. prompt_tokens = [prompt_tokens]
  454. if use_prompt:
  455. assert len(prompt_text) == len(
  456. prompt_tokens
  457. ), "Prompt text and tokens must have the same length"
  458. if prompt_tokens:
  459. prompt_tokens = [i.cpu() for i in prompt_tokens]
  460. model_size = sum(p.numel() for p in model.parameters() if p.requires_grad)
  461. tokenizer = model.tokenizer
  462. max_length = model.config.max_seq_len
  463. # Build base conversation with system message
  464. base_conversation = Conversation()
  465. if use_prompt:
  466. # Auto-add speaker tags to prompt texts that don't have them
  467. tagged_prompt_text = []
  468. for i, t in enumerate(prompt_text):
  469. if not re.search(r"<\|speaker:\d+\|>", t):
  470. tagged_prompt_text.append(f"<|speaker:{i}|>{t}")
  471. else:
  472. tagged_prompt_text.append(t)
  473. system_parts = [
  474. TextPart(
  475. text="convert the provided text to speech reference to the following:\n\nText:\n",
  476. cal_loss=False,
  477. ),
  478. ]
  479. reference_text = "\n".join(tagged_prompt_text)
  480. system_parts.append(TextPart(text=reference_text, cal_loss=False))
  481. system_parts.append(TextPart(text="\n\nSpeech:\n", cal_loss=False))
  482. all_codes = torch.cat([c for c in prompt_tokens], dim=1)
  483. system_parts.append(VQPart(codes=all_codes, cal_loss=False))
  484. # torch.save(all_codes, "debug_vq_codes.pt")
  485. else:
  486. system_parts = [
  487. TextPart(text="convert the provided text to speech", cal_loss=False)
  488. ]
  489. base_conversation.append(
  490. Message(
  491. role="system",
  492. parts=system_parts,
  493. cal_loss=False,
  494. add_im_start=True,
  495. add_im_end=True,
  496. )
  497. )
  498. # Split text by speaker and group into batches
  499. turns = split_text_by_speaker(text)
  500. if turns:
  501. batches = group_turns_into_batches(
  502. turns, max_speakers=5, max_bytes=chunk_length
  503. )
  504. else:
  505. batches = [text]
  506. logger.info(f"Split into {len(turns)} turns, grouped into {len(batches)} batches")
  507. for sample_idx in range(num_samples):
  508. if torch.cuda.is_available():
  509. torch.cuda.synchronize()
  510. t0 = time.perf_counter()
  511. # Deep copy base conversation for this sample
  512. conversation = deepcopy(base_conversation)
  513. for batch_idx, batch_text in enumerate(batches):
  514. logger.info(
  515. f"--- Sample {sample_idx}, Batch {batch_idx} "
  516. f"({len(batch_text.encode('utf-8'))} bytes) ---"
  517. )
  518. logger.info(f"Batch text: {batch_text}")
  519. # Add user message
  520. conversation.append(
  521. Message(
  522. role="user",
  523. parts=[TextPart(text=batch_text, cal_loss=False)],
  524. cal_loss=False,
  525. add_im_start=True,
  526. add_im_end=True,
  527. )
  528. )
  529. # Deep copy for generation (don't pollute original conversation)
  530. conversation_gen = deepcopy(conversation)
  531. conversation_gen.append(
  532. Message(
  533. role="assistant",
  534. parts=[],
  535. cal_loss=False,
  536. modality="voice",
  537. add_im_start=True,
  538. add_im_end=False,
  539. )
  540. )
  541. logger.info("Visualizing prompt structure:")
  542. conversation_gen.visualize(
  543. tokenizer,
  544. merge_audio_tokens=True,
  545. merge_semantic_tokens=True,
  546. )
  547. encoded, audio_masks, audio_parts = conversation_gen.encode_for_inference(
  548. tokenizer, num_codebooks=model.config.num_codebooks
  549. )
  550. logger.info(f"Encoded prompt shape: {encoded.shape}")
  551. if audio_parts is not None:
  552. logger.info(f"Audio parts shape: {audio_parts.shape}")
  553. if audio_masks is not None:
  554. logger.info(
  555. f"Audio masks non-zero count: {torch.count_nonzero(audio_masks)}"
  556. )
  557. if encoded.size(1) > max_length - 2048:
  558. raise ValueError(
  559. f"Prompt is too long: {encoded.size(1)} > {max_length - 2048}"
  560. )
  561. encoded = encoded.to(device=device)
  562. prompt_length = encoded.size(1)
  563. y = generate(
  564. model=model,
  565. prompt=encoded,
  566. max_new_tokens=max_new_tokens,
  567. audio_masks=audio_masks,
  568. audio_parts=audio_parts,
  569. decode_one_token=decode_one_token,
  570. temperature=temperature,
  571. top_p=top_p,
  572. top_k=top_k,
  573. )
  574. if sample_idx == 0 and batch_idx == 0 and compile:
  575. logger.info(f"Compilation time: {time.perf_counter() - t0:.2f} seconds")
  576. if torch.cuda.is_available():
  577. torch.cuda.synchronize()
  578. t_batch = time.perf_counter() - t0
  579. tokens_generated = y.size(1) - prompt_length
  580. tokens_sec = tokens_generated / t_batch if t_batch > 0 else 0
  581. logger.info(
  582. f"Batch {batch_idx}: Generated {tokens_generated} tokens in "
  583. f"{t_batch:.02f} seconds, {tokens_sec:.02f} tokens/sec"
  584. )
  585. logger.info(
  586. f"Bandwidth achieved: {model_size * tokens_sec / 1e9:.02f} GB/s"
  587. )
  588. # Extract generated codes
  589. codes = y[1:, prompt_length:-1].clone()
  590. assert (codes >= 0).all(), f"Negative code found: {codes}"
  591. # Add assistant message with generated codes back to conversation
  592. conversation.append(
  593. Message(
  594. role="assistant",
  595. parts=[VQPart(codes=codes.cpu(), cal_loss=False)],
  596. cal_loss=False,
  597. modality="voice",
  598. add_im_start=True,
  599. add_im_end=True,
  600. )
  601. )
  602. yield GenerateResponse(action="sample", codes=codes, text=batch_text)
  603. MAX_HISTORY_TURNS = 2 # 只保留最近 2 轮 user/assistant
  604. assistant_indices = [i for i, m in enumerate(conversation.messages) if m.role == "assistant"]
  605. if len(assistant_indices) > MAX_HISTORY_TURNS:
  606. drop = assistant_indices[0]
  607. # 移除最早的 user+assistant 对,保留 system 消息
  608. conversation = Conversation([m for i, m in enumerate(conversation.messages)
  609. if i not in (drop - 1, drop)])
  610. # Cleanup
  611. del y, encoded
  612. if torch.cuda.is_available():
  613. torch.cuda.empty_cache()
  614. import gc
  615. gc.collect()
  616. if torch.cuda.is_available():
  617. logger.info(
  618. f"GPU Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB"
  619. )
  620. yield GenerateResponse(action="next")
  621. @dataclass
  622. class WrappedGenerateResponse:
  623. status: Literal["success", "error"]
  624. response: Optional[Union[GenerateResponse, Exception]] = None
  625. @dataclass
  626. class GenerateRequest:
  627. request: dict
  628. response_queue: queue.Queue
  629. def launch_thread_safe_queue(
  630. checkpoint_path,
  631. device,
  632. precision,
  633. compile: bool = False,
  634. num_workers: int = 1,
  635. ):
  636. input_queue = queue.Queue()
  637. init_events = [threading.Event() for _ in range(num_workers)]
  638. def worker(worker_id, init_event):
  639. logger.info(f"Worker {worker_id} starting, loading model...")
  640. model, decode_one_token = init_model(
  641. checkpoint_path, device, precision, compile=compile
  642. )
  643. with torch.device(device):
  644. model.setup_caches(
  645. max_batch_size=1,
  646. max_seq_len=model.config.max_seq_len,
  647. dtype=next(model.parameters()).dtype,
  648. )
  649. logger.info(f"Worker {worker_id} initialized")
  650. init_event.set()
  651. while True:
  652. item: GenerateRequest | None = input_queue.get()
  653. if item is None:
  654. break
  655. kwargs = item.request
  656. response_queue = item.response_queue
  657. try:
  658. for chunk in generate_long(
  659. model=model, decode_one_token=decode_one_token, **kwargs
  660. ):
  661. response_queue.put(
  662. WrappedGenerateResponse(status="success", response=chunk)
  663. )
  664. # Only clear cache after complete request batch
  665. if torch.cuda.is_available():
  666. torch.cuda.empty_cache()
  667. except Exception as e:
  668. logger.error(traceback.format_exc())
  669. response_queue.put(WrappedGenerateResponse(status="error", response=e))
  670. # Clear cache on error
  671. if torch.cuda.is_available():
  672. torch.cuda.empty_cache()
  673. for i in range(num_workers):
  674. threading.Thread(target=worker, args=(i, init_events[i]), daemon=True).start()
  675. for event in init_events:
  676. event.wait()
  677. logger.info(f"All {num_workers} workers initialized successfully")
  678. return input_queue
  679. @click.command()
  680. @click.option(
  681. "--text",
  682. type=str,
  683. default="<|speaker:0|>你说的对, 但是原神是一款由米哈游自主研发的开放世界手游.",
  684. )
  685. @click.option("--prompt-text", type=str, default=None, multiple=True)
  686. @click.option(
  687. "--prompt-tokens",
  688. type=click.Path(path_type=Path, exists=True),
  689. default=None,
  690. multiple=True,
  691. )
  692. @click.option(
  693. "--prompt-audio",
  694. type=click.Path(path_type=Path, exists=True),
  695. default=None,
  696. multiple=True,
  697. )
  698. @click.option("--output", type=click.Path(path_type=Path), default=None)
  699. @click.option("--num-samples", type=int, default=1)
  700. @click.option("--max-new-tokens", type=int, default=0)
  701. @click.option("--top-p", type=float, default=0.9)
  702. @click.option("--top-k", type=int, default=30)
  703. @click.option("--temperature", type=float, default=1.0)
  704. @click.option(
  705. "--checkpoint-path",
  706. type=click.Path(path_type=Path, exists=True),
  707. default="checkpoints/s2-pro",
  708. )
  709. @click.option("--device", type=str, default="cuda")
  710. @click.option("--compile/--no-compile", default=False)
  711. @click.option("--seed", type=int, default=42)
  712. @click.option("--half/--no-half", default=False)
  713. @click.option("--iterative-prompt/--no-iterative-prompt", default=True)
  714. @click.option("--chunk-length", type=int, default=300)
  715. @click.option("--output-dir", type=Path, default="output")
  716. def main(
  717. text: str,
  718. prompt_text: Optional[tuple[str, ...]],
  719. prompt_tokens: Optional[tuple[Path, ...]],
  720. prompt_audio: Optional[tuple[Path, ...]],
  721. output: Optional[Path],
  722. num_samples: int,
  723. max_new_tokens: int,
  724. top_p: float,
  725. top_k: int,
  726. temperature: float,
  727. checkpoint_path: Path,
  728. device: str,
  729. compile: bool,
  730. seed: int,
  731. half: bool,
  732. iterative_prompt: bool,
  733. chunk_length: int,
  734. output_dir: Path,
  735. ) -> None:
  736. os.makedirs(output_dir, exist_ok=True)
  737. precision = torch.half if half else torch.bfloat16
  738. if prompt_text and not prompt_audio and not prompt_tokens:
  739. raise ValueError(
  740. "--prompt-text requires either --prompt-audio or --prompt-tokens"
  741. )
  742. if prompt_text and prompt_tokens and len(prompt_text) != len(prompt_tokens):
  743. raise ValueError(
  744. f"Number of prompt text ({len(prompt_text)}) and prompt tokens ({len(prompt_tokens)}) should be the same"
  745. )
  746. if prompt_text and prompt_audio and len(prompt_text) != len(prompt_audio):
  747. raise ValueError(
  748. f"Number of prompt text ({len(prompt_text)}) and prompt audio ({len(prompt_audio)}) should be the same"
  749. )
  750. logger.info("Loading model ...")
  751. t0 = time.time()
  752. model, decode_one_token = init_model(
  753. checkpoint_path, device, precision, compile=compile
  754. )
  755. with torch.device(device):
  756. model.setup_caches(
  757. max_batch_size=1,
  758. max_seq_len=model.config.max_seq_len,
  759. dtype=next(model.parameters()).dtype,
  760. )
  761. if torch.cuda.is_available():
  762. torch.cuda.synchronize()
  763. logger.info(f"Time to load model: {time.time() - t0:.02f} seconds")
  764. codec = None
  765. codec_checkpoint = checkpoint_path / "codec.pth"
  766. # Handle prompt: --prompt-audio takes priority over --prompt-tokens
  767. prompt_tokens_list = None
  768. if prompt_audio:
  769. logger.info("Loading codec model for audio encoding...")
  770. codec = load_codec_model(codec_checkpoint, device, precision)
  771. prompt_tokens_list = [
  772. encode_audio(p, codec, device).cpu() for p in prompt_audio
  773. ]
  774. logger.info(f"Encoded {len(prompt_audio)} audio file(s) to VQ codes")
  775. elif prompt_tokens is not None:
  776. prompt_tokens_list = [torch.from_numpy(np.load(p)) for p in prompt_tokens]
  777. torch.manual_seed(seed)
  778. if torch.cuda.is_available():
  779. torch.cuda.manual_seed(seed)
  780. generator = generate_long(
  781. model=model,
  782. device=device,
  783. decode_one_token=decode_one_token,
  784. text=text,
  785. num_samples=num_samples,
  786. max_new_tokens=max_new_tokens,
  787. top_p=top_p,
  788. top_k=top_k,
  789. temperature=temperature,
  790. compile=compile,
  791. iterative_prompt=iterative_prompt,
  792. chunk_length=chunk_length,
  793. prompt_text=list(prompt_text) if prompt_text else None,
  794. prompt_tokens=prompt_tokens_list,
  795. )
  796. idx = 0
  797. codes = []
  798. for response in generator:
  799. if response.action == "sample":
  800. codes.append(response.codes)
  801. logger.info(f"Sampled text: {response.text}")
  802. elif response.action == "next":
  803. if codes:
  804. merged_codes = torch.cat(codes, dim=1)
  805. codes_npy_path = os.path.join(output_dir, f"codes_{idx}.npy")
  806. np.save(codes_npy_path, merged_codes.cpu().numpy())
  807. logger.info(f"Saved codes to {codes_npy_path}")
  808. # Decode to wav if --output is specified
  809. if output:
  810. if codec is None:
  811. logger.info("Loading codec model for audio decoding...")
  812. codec = load_codec_model(codec_checkpoint, device, precision)
  813. audio = decode_to_audio(merged_codes.to(device), codec)
  814. import soundfile as sf
  815. out_path = (
  816. str(output)
  817. if num_samples == 1
  818. else str(output.with_stem(f"{output.stem}_{idx}"))
  819. )
  820. sf.write(out_path, audio.cpu().float().numpy(), codec.sample_rate)
  821. logger.info(f"Saved audio to {out_path}")
  822. logger.info(f"Next sample")
  823. codes = []
  824. idx += 1
  825. else:
  826. logger.error(f"Error: {response}")
  827. if __name__ == "__main__":
  828. main()