inference.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983
  1. import os
  2. import queue
  3. import re
  4. import threading
  5. import time
  6. import traceback
  7. from copy import deepcopy
  8. from dataclasses import dataclass
  9. from pathlib import Path
  10. from typing import Callable, Literal, Optional, Tuple, Union
  11. import click
  12. import numpy as np
  13. import torch
  14. import torch._inductor.config
  15. from loguru import logger
  16. from tqdm import tqdm
  17. from fish_speech.content_sequence import (
  18. TextPart,
  19. VQPart,
  20. )
  21. from fish_speech.conversation import Conversation, Message
  22. from fish_speech.tokenizer import IM_END_TOKEN
  23. os.environ["TOKENIZERS_PARALLELISM"] = "false"
  24. torch._inductor.config.coordinate_descent_tuning = True
  25. torch._inductor.config.triton.unique_kernel_names = True
  26. if hasattr(torch._inductor.config, "fx_graph_cache"):
  27. torch._inductor.config.fx_graph_cache = True
  28. from torch.nn.attention import SDPBackend, sdpa_kernel
  29. from fish_speech.models.text2semantic.llama import (
  30. BaseTransformer,
  31. DualARTransformer,
  32. NaiveTransformer,
  33. )
  34. def multinomial_sample_one_no_sync(
  35. probs_sort,
  36. ): # Does multinomial sampling without a cuda synchronization
  37. q = torch.empty_like(probs_sort).exponential_(1)
  38. return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
  39. RAS_WIN_SIZE = 10 # window for Repetition Aware Sampling
  40. RAS_HIGH_TEMP = 1.0
  41. RAS_HIGH_TOP_P = 0.9
  42. def logits_to_probs(
  43. logits,
  44. temperature: torch.Tensor,
  45. top_p: torch.Tensor,
  46. top_k: torch.Tensor,
  47. ) -> torch.Tensor:
  48. # Sort and compute top-p mask
  49. sorted_logits, sorted_indices = torch.sort(logits, descending=True)
  50. cum_probs = torch.cumsum(torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1)
  51. sorted_indices_to_remove = cum_probs > top_p
  52. # top-k mask
  53. sorted_indices_to_remove[top_k:] = True
  54. sorted_indices_to_remove[0] = False # keep at least one option
  55. indices_to_remove = sorted_indices_to_remove.scatter(
  56. dim=-1, index=sorted_indices, src=sorted_indices_to_remove
  57. )
  58. logits = logits.masked_fill(indices_to_remove, -float("Inf"))
  59. logits = logits / torch.clip(temperature, min=1e-5)
  60. probs = torch.nn.functional.softmax(logits, dim=-1)
  61. return probs
  62. def sample(
  63. logits,
  64. temperature: torch.Tensor,
  65. top_p: torch.Tensor,
  66. top_k: int,
  67. ) -> Tuple[torch.Tensor, torch.Tensor]:
  68. probs = logits_to_probs(
  69. logits=logits[0, -1],
  70. temperature=temperature,
  71. top_p=top_p,
  72. top_k=top_k,
  73. )
  74. idx_next = multinomial_sample_one_no_sync(probs)
  75. return idx_next, probs
  76. def decode_one_token_ar(
  77. model: DualARTransformer,
  78. x: torch.Tensor,
  79. input_pos: torch.Tensor,
  80. temperature: torch.Tensor,
  81. top_p: torch.Tensor,
  82. top_k: int,
  83. semantic_logit_bias: torch.Tensor,
  84. audio_masks: torch.Tensor,
  85. audio_parts: torch.Tensor,
  86. previous_tokens: Optional[torch.Tensor] = None,
  87. ) -> torch.Tensor:
  88. forward_result = model.forward_generate(
  89. x,
  90. input_pos,
  91. audio_masks=audio_masks,
  92. audio_parts=audio_parts,
  93. )
  94. logits = forward_result.logits # (1, 1, vocab_size)
  95. hidden_states = forward_result.hidden_states
  96. # Apply constrained decoding: only allow semantic tokens + im_end
  97. biased_logits = logits + semantic_logit_bias
  98. # Normal sample
  99. main_token_normal = sample(biased_logits, temperature=temperature, top_p=top_p, top_k=top_k)[0]
  100. # RAS: also sample with high temp to use as fallback if token repeats
  101. high_temp = torch.tensor(RAS_HIGH_TEMP, device=temperature.device, dtype=temperature.dtype)
  102. high_top_p = torch.tensor(RAS_HIGH_TOP_P, device=top_p.device, dtype=top_p.dtype)
  103. main_token_high = sample(biased_logits, temperature=high_temp, top_p=high_top_p, top_k=top_k)[0]
  104. # Use high-temp sample if: token is semantic AND token is in previous window
  105. if previous_tokens is not None:
  106. in_window = (previous_tokens[0] == main_token_normal).any()
  107. # Use tensor ops (&, torch.where) instead of Python (and, if) — torch.compile requires no data-dependent branching
  108. is_semantic = (
  109. (main_token_normal >= model.config.semantic_begin_id)
  110. & (main_token_normal <= model.config.semantic_end_id)
  111. )
  112. should_use_high = in_window & is_semantic
  113. main_token_normal = torch.where(should_use_high, main_token_high, main_token_normal)
  114. codebooks = [main_token_normal]
  115. # Only clear cache for fast_layers, avoid clearing main model cache
  116. for layer in model.fast_layers:
  117. if hasattr(layer, "attention") and hasattr(layer.attention, "kv_cache"):
  118. layer.attention.kv_cache.k_cache.fill_(0)
  119. layer.attention.kv_cache.v_cache.fill_(0)
  120. input_pos = torch.tensor([0], device=hidden_states.device, dtype=torch.long)
  121. model.forward_generate_fast(hidden_states, input_pos)
  122. # [MODIFIED] Access config instead of tokenizer
  123. a = codebooks[0] - model.config.semantic_begin_id
  124. a[a < 0] = 0
  125. a[a >= model.config.codebook_size] = 0
  126. hidden_states = model.fast_embeddings(a)
  127. codebooks.append(a)
  128. for codebook_idx in range(1, model.config.num_codebooks):
  129. input_pos = torch.tensor(
  130. [codebook_idx], device=hidden_states.device, dtype=torch.long
  131. )
  132. logits = model.forward_generate_fast(hidden_states, input_pos)
  133. short_logits = logits # DualAR predicts config.codebook_size number of tokens
  134. # Convert logits to probs (no constrain for fast codebooks)
  135. a = sample(
  136. short_logits,
  137. temperature=temperature,
  138. top_p=top_p,
  139. top_k=top_k,
  140. )[0]
  141. hidden_states = model.fast_embeddings(a)
  142. codebooks.append(a)
  143. codebooks = torch.stack(codebooks, dim=1)
  144. # Only delete references, let Python GC handle cleanup
  145. del logits, hidden_states, forward_result
  146. return codebooks.T
  147. def decode_n_tokens(
  148. model: DualARTransformer,
  149. cur_token: torch.Tensor,
  150. input_pos: torch.Tensor,
  151. num_new_tokens: int,
  152. temperature: torch.Tensor,
  153. top_p: torch.Tensor,
  154. top_k: int,
  155. semantic_logit_bias: torch.Tensor,
  156. audio_masks: torch.Tensor,
  157. audio_parts: torch.Tensor,
  158. decode_one_token=decode_one_token_ar,
  159. ):
  160. # Rolling window for RAS (Repetition Aware Sampling)
  161. previous_tokens = torch.zeros(
  162. (model.config.num_codebooks + 1, RAS_WIN_SIZE),
  163. dtype=torch.int,
  164. device=cur_token.device,
  165. )
  166. # Accumulate all generated tokens (the actual output)
  167. new_tokens = []
  168. # [MODIFIED] Pre-fetch ID for efficiency loop
  169. im_end_id = model.tokenizer.get_token_id(IM_END_TOKEN)
  170. for i in tqdm(range(num_new_tokens)):
  171. with sdpa_kernel(SDPBackend.MATH):
  172. next_token = decode_one_token(
  173. model=model,
  174. x=cur_token,
  175. input_pos=input_pos,
  176. previous_tokens=previous_tokens,
  177. temperature=temperature,
  178. top_p=top_p,
  179. top_k=top_k,
  180. semantic_logit_bias=semantic_logit_bias,
  181. audio_masks=audio_masks,
  182. audio_parts=audio_parts,
  183. ).clone()
  184. input_pos += 1
  185. cur_token = next_token.view(1, model.config.num_codebooks + 1, -1)
  186. # Roll RAS window left and insert new token at end
  187. previous_tokens = previous_tokens.roll(-1, dims=1)
  188. previous_tokens[:, -1] = next_token.view(model.config.num_codebooks + 1, -1)[:, 0]
  189. new_tokens.append(next_token)
  190. if cur_token[0, 0, -1] == im_end_id:
  191. break
  192. del cur_token
  193. return torch.cat(new_tokens, dim=1)
  194. @torch.no_grad()
  195. @torch.inference_mode()
  196. def generate(
  197. *,
  198. model: DualARTransformer,
  199. prompt: torch.Tensor,
  200. max_new_tokens: int,
  201. audio_masks: torch.Tensor,
  202. audio_parts: torch.Tensor,
  203. decode_one_token=decode_one_token_ar,
  204. num_samples: int = 1,
  205. **sampling_kwargs,
  206. ):
  207. """
  208. Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested.
  209. """
  210. # create an empty tensor of the expected final shape and fill in the current tokens
  211. T = prompt.size(1)
  212. prompt = prompt[None].repeat(num_samples, 1, 1)
  213. if T >= model.config.max_seq_len:
  214. raise ValueError(
  215. f"Input sequence length {T} exceeds max_seq_len {model.config.max_seq_len}"
  216. )
  217. if max_new_tokens:
  218. if T + max_new_tokens > model.config.max_seq_len:
  219. max_new_tokens = model.config.max_seq_len - T
  220. T_new = T + max_new_tokens
  221. else:
  222. T_new = model.config.max_seq_len
  223. max_new_tokens = T_new - T
  224. device = prompt.device
  225. dtype = next(model.parameters()).dtype # model weight dtype (bfloat16), NOT prompt dtype (int32)
  226. # Critical fix: Only set up cache on first run or when necessary
  227. if not hasattr(model, "_cache_setup_done") or not model._cache_setup_done:
  228. with torch.device(device):
  229. model.setup_caches(
  230. max_batch_size=1, # Fixed to 1, avoid dynamic changes
  231. max_seq_len=model.config.max_seq_len,
  232. dtype=next(model.parameters()).dtype,
  233. )
  234. model._cache_setup_done = True
  235. codebook_dim = 1 + model.config.num_codebooks
  236. # Create new tensor each time, but try to reuse memory
  237. input_pos = torch.arange(0, T, device=device, dtype=torch.long)
  238. empty = torch.empty(
  239. (codebook_dim, model.config.max_seq_len), dtype=prompt.dtype, device=device
  240. )
  241. empty[:, :T] = prompt
  242. seq = empty
  243. temp_val = sampling_kwargs.get("temperature", 1.0)
  244. top_p_val = sampling_kwargs.get("top_p", 0.9)
  245. top_k_val = sampling_kwargs.get("top_k", 30)
  246. temperature = torch.tensor(temp_val, device=device, dtype=dtype)
  247. top_p = torch.tensor(top_p_val, device=device, dtype=dtype)
  248. # Build semantic logit bias: 0 for semantic tokens + im_end, -inf for all others
  249. vocab_size = model.config.vocab_size
  250. semantic_logit_bias = torch.full(
  251. (1, 1, vocab_size), float("-inf"), device=device, dtype=dtype
  252. )
  253. # [MODIFIED] Use config for semantic range
  254. semantic_logit_bias[
  255. 0, 0, model.config.semantic_begin_id : model.config.semantic_end_id + 1
  256. ] = 0.0
  257. # [MODIFIED] Use tokenizer.get_token_id (Wrapper method)
  258. semantic_logit_bias[0, 0, model.tokenizer.get_token_id(IM_END_TOKEN)] = 0.0
  259. prefill_decode = decode_one_token_ar
  260. first_token = prefill_decode(
  261. model,
  262. prompt.view(1, codebook_dim, -1),
  263. input_pos,
  264. temperature,
  265. top_p,
  266. top_k_val,
  267. semantic_logit_bias,
  268. audio_masks,
  269. audio_parts,
  270. )
  271. seq[:, T : T + 1] = first_token
  272. # Recreate input_pos
  273. input_pos = torch.tensor([T], device=device, dtype=torch.int)
  274. x = decode_n_tokens(
  275. model,
  276. first_token.view(1, codebook_dim, -1),
  277. input_pos,
  278. max_new_tokens - 1,
  279. temperature=temperature,
  280. top_p=top_p,
  281. top_k=top_k_val,
  282. semantic_logit_bias=semantic_logit_bias,
  283. audio_masks=audio_masks,
  284. audio_parts=audio_parts,
  285. decode_one_token=decode_one_token,
  286. )
  287. seq = seq[:, : T + 1 + x.size(1)]
  288. seq[:, T + 1 :] = x
  289. # Clean up temporary variables
  290. del first_token, x, prompt, empty, input_pos
  291. return seq
  292. def init_model(checkpoint_path, device, precision, compile=False):
  293. model = DualARTransformer.from_pretrained(checkpoint_path, load_weights=True)
  294. model = model.to(device=device, dtype=precision)
  295. logger.info(f"Restored model from checkpoint")
  296. if isinstance(model, DualARTransformer):
  297. decode_one_token = decode_one_token_ar
  298. # prefill_n_tokens = decode_one_token_ar
  299. logger.info("Using DualARTransformer")
  300. else:
  301. raise ValueError("Unsupported model type")
  302. # Pre-create fixed parameter tensors to avoid runtime creation
  303. model.fixed_temperature = torch.tensor(0.7, device=device, dtype=torch.float)
  304. model.fixed_top_p = torch.tensor(0.7, device=device, dtype=torch.float)
  305. model.fixed_repetition_penalty = torch.tensor(1.5, device=device, dtype=torch.float)
  306. # Mark whether cache has been initialized
  307. model._cache_setup_done = False
  308. if compile:
  309. logger.info("Compiling function...")
  310. decode_one_token = torch.compile(
  311. decode_one_token,
  312. backend="inductor" if torch.cuda.is_available() else "aot_eager",
  313. mode="reduce-overhead" if torch.cuda.is_available() else None,
  314. fullgraph=True,
  315. )
  316. return model.eval(), decode_one_token
  317. @torch.inference_mode()
  318. def load_codec_model(codec_checkpoint_path, device, precision=torch.bfloat16):
  319. """Load the DAC codec model for audio encoding/decoding."""
  320. from hydra.utils import instantiate
  321. from omegaconf import OmegaConf
  322. config_path = Path(__file__).parent.parent.parent / "configs" / "modded_dac_vq.yaml"
  323. cfg = OmegaConf.load(str(config_path))
  324. codec = instantiate(cfg)
  325. state_dict = torch.load(codec_checkpoint_path, map_location="cpu")
  326. if "state_dict" in state_dict:
  327. state_dict = state_dict["state_dict"]
  328. if any("generator" in k for k in state_dict):
  329. state_dict = {
  330. k.replace("generator.", ""): v
  331. for k, v in state_dict.items()
  332. if "generator." in k
  333. }
  334. codec.load_state_dict(state_dict, strict=False)
  335. codec.eval()
  336. codec.to(device=device, dtype=precision)
  337. return codec
  338. @torch.inference_mode()
  339. def encode_audio(audio_path, codec, device):
  340. """Encode an audio file to VQ codes."""
  341. import torchaudio
  342. wav, sr = torchaudio.load(str(audio_path))
  343. if wav.shape[0] > 1:
  344. wav = wav.mean(dim=0, keepdim=True)
  345. wav = torchaudio.functional.resample(
  346. wav.to(device), sr, codec.sample_rate
  347. )[0]
  348. # Match codec model dtype (e.g. bfloat16)
  349. model_dtype = next(codec.parameters()).dtype
  350. audios = wav[None, None].to(dtype=model_dtype) # (1, 1, T)
  351. audio_lengths = torch.tensor([len(wav)], device=device, dtype=torch.long)
  352. indices, feature_lengths = codec.encode(audios, audio_lengths)
  353. return indices[0, :, : feature_lengths[0]] # (num_codebooks, T)
  354. @torch.inference_mode()
  355. def decode_to_audio(codes, codec):
  356. """Decode VQ codes to audio waveform."""
  357. # codes: (num_codebooks, T) -> (1, num_codebooks, T)
  358. audio = codec.from_indices(codes[None])
  359. return audio[0, 0] # (T,) mono waveform
  360. @dataclass
  361. class GenerateResponse:
  362. action: Literal["sample", "next"]
  363. codes: Optional[torch.Tensor] = None
  364. text: Optional[str] = None
  365. def split_text_by_speaker(text: str) -> list[str]:
  366. """
  367. Split text into turns based on <|speaker:X|> tags.
  368. Args:
  369. text: The full text with speaker tags
  370. Returns:
  371. List of speaker turns, each starting with <|speaker:X|>
  372. """
  373. pattern = r"(<\|speaker:\d+\|>)"
  374. parts = re.split(pattern, text)
  375. turns = []
  376. i = 0
  377. while i < len(parts):
  378. part = parts[i].strip()
  379. if re.match(pattern, part):
  380. if i + 1 < len(parts):
  381. turn = part + parts[i + 1]
  382. turns.append(turn.strip())
  383. i += 2
  384. else:
  385. turns.append(part)
  386. i += 1
  387. else:
  388. i += 1
  389. return turns
  390. def group_turns_into_batches(
  391. turns: list[str], max_speakers: int = 3, max_bytes: int = 300
  392. ) -> list[str]:
  393. """
  394. Group turns into batches based on speaker count or byte limit.
  395. Args:
  396. turns: List of speaker turns
  397. max_speakers: Maximum number of speakers per batch (default 3)
  398. max_bytes: Maximum UTF-8 bytes per batch (default 300)
  399. Returns:
  400. List of batched text strings
  401. """
  402. batches = []
  403. current_batch = []
  404. current_bytes = 0
  405. for turn in turns:
  406. turn_bytes = len(turn.encode("utf-8"))
  407. would_exceed_speakers = len(current_batch) >= max_speakers
  408. would_exceed_bytes = current_bytes + turn_bytes > max_bytes and current_batch
  409. if would_exceed_speakers or would_exceed_bytes:
  410. batches.append("\n".join(current_batch))
  411. current_batch = [turn]
  412. current_bytes = turn_bytes
  413. else:
  414. current_batch.append(turn)
  415. current_bytes += turn_bytes
  416. if current_batch:
  417. batches.append("\n".join(current_batch))
  418. return batches
  419. def generate_long(
  420. *,
  421. model,
  422. device: Union[str, torch.device],
  423. decode_one_token: Callable,
  424. text: str,
  425. num_samples: int = 1,
  426. max_new_tokens: int = 0,
  427. top_p: float = 0.9,
  428. top_k: int = 30,
  429. repetition_penalty: float = 1.1,
  430. temperature: float = 1.0,
  431. compile: bool = False,
  432. iterative_prompt: bool = True,
  433. chunk_length: int = 512,
  434. prompt_text: Optional[Union[str, list[str]]] = None,
  435. prompt_tokens: Optional[Union[torch.Tensor, list[torch.Tensor]]] = None,
  436. ):
  437. assert 0 < top_p <= 1, "top_p must be in (0, 1]"
  438. assert 0 < temperature < 2, "temperature must be in (0, 2)"
  439. use_prompt = bool(prompt_text) and bool(prompt_tokens)
  440. if use_prompt and isinstance(prompt_text, str):
  441. prompt_text = [prompt_text]
  442. prompt_tokens = [prompt_tokens]
  443. if use_prompt:
  444. assert len(prompt_text) == len(
  445. prompt_tokens
  446. ), "Prompt text and tokens must have the same length"
  447. if prompt_tokens:
  448. prompt_tokens = [i.cpu() for i in prompt_tokens]
  449. model_size = sum(p.numel() for p in model.parameters() if p.requires_grad)
  450. tokenizer = model.tokenizer
  451. max_length = model.config.max_seq_len
  452. # Build base conversation with system message
  453. base_conversation = Conversation()
  454. if use_prompt:
  455. # Auto-add speaker tags to prompt texts that don't have them
  456. tagged_prompt_text = []
  457. for i, t in enumerate(prompt_text):
  458. if not re.search(r"<\|speaker:\d+\|>", t):
  459. tagged_prompt_text.append(f"<|speaker:{i}|>{t}")
  460. else:
  461. tagged_prompt_text.append(t)
  462. system_parts = [
  463. TextPart(
  464. text="convert the provided text to speech reference to the following:\n\nText:\n",
  465. cal_loss=False,
  466. ),
  467. ]
  468. reference_text = "\n".join(tagged_prompt_text)
  469. system_parts.append(TextPart(text=reference_text, cal_loss=False))
  470. system_parts.append(TextPart(text="\n\nSpeech:\n", cal_loss=False))
  471. all_codes = torch.cat([c for c in prompt_tokens], dim=1)
  472. system_parts.append(VQPart(codes=all_codes, cal_loss=False))
  473. # torch.save(all_codes, "debug_vq_codes.pt")
  474. else:
  475. system_parts = [
  476. TextPart(text="convert the provided text to speech", cal_loss=False)
  477. ]
  478. base_conversation.append(
  479. Message(
  480. role="system",
  481. parts=system_parts,
  482. cal_loss=False,
  483. add_im_start=True,
  484. add_im_end=True,
  485. )
  486. )
  487. # Split text by speaker and group into batches
  488. turns = split_text_by_speaker(text)
  489. if turns:
  490. batches = group_turns_into_batches(
  491. turns, max_speakers=5, max_bytes=chunk_length
  492. )
  493. else:
  494. batches = [text]
  495. logger.info(
  496. f"Split into {len(turns)} turns, grouped into {len(batches)} batches"
  497. )
  498. for sample_idx in range(num_samples):
  499. if torch.cuda.is_available():
  500. torch.cuda.synchronize()
  501. t0 = time.perf_counter()
  502. # Deep copy base conversation for this sample
  503. conversation = deepcopy(base_conversation)
  504. for batch_idx, batch_text in enumerate(batches):
  505. logger.info(
  506. f"--- Sample {sample_idx}, Batch {batch_idx} "
  507. f"({len(batch_text.encode('utf-8'))} bytes) ---"
  508. )
  509. logger.info(f"Batch text: {batch_text}")
  510. # Add user message
  511. conversation.append(
  512. Message(
  513. role="user",
  514. parts=[TextPart(text=batch_text, cal_loss=False)],
  515. cal_loss=False,
  516. add_im_start=True,
  517. add_im_end=True,
  518. )
  519. )
  520. # Deep copy for generation (don't pollute original conversation)
  521. conversation_gen = deepcopy(conversation)
  522. conversation_gen.append(
  523. Message(
  524. role="assistant",
  525. parts=[],
  526. cal_loss=False,
  527. modality="voice",
  528. add_im_start=True,
  529. add_im_end=False,
  530. )
  531. )
  532. logger.info("Visualizing prompt structure:")
  533. conversation_gen.visualize(
  534. tokenizer,
  535. merge_audio_tokens=True,
  536. merge_semantic_tokens=True,
  537. )
  538. encoded, audio_masks, audio_parts = (
  539. conversation_gen.encode_for_inference(
  540. tokenizer, num_codebooks=model.config.num_codebooks
  541. )
  542. )
  543. logger.info(f"Encoded prompt shape: {encoded.shape}")
  544. if audio_parts is not None:
  545. logger.info(f"Audio parts shape: {audio_parts.shape}")
  546. if audio_masks is not None:
  547. logger.info(
  548. f"Audio masks non-zero count: {torch.count_nonzero(audio_masks)}"
  549. )
  550. if encoded.size(1) > max_length - 2048:
  551. raise ValueError(
  552. f"Prompt is too long: {encoded.size(1)} > {max_length - 2048}"
  553. )
  554. encoded = encoded.to(device=device)
  555. prompt_length = encoded.size(1)
  556. y = generate(
  557. model=model,
  558. prompt=encoded,
  559. max_new_tokens=max_new_tokens,
  560. audio_masks=audio_masks,
  561. audio_parts=audio_parts,
  562. decode_one_token=decode_one_token,
  563. temperature=temperature,
  564. top_p=top_p,
  565. top_k=top_k,
  566. )
  567. if sample_idx == 0 and batch_idx == 0 and compile:
  568. logger.info(
  569. f"Compilation time: {time.perf_counter() - t0:.2f} seconds"
  570. )
  571. if torch.cuda.is_available():
  572. torch.cuda.synchronize()
  573. t_batch = time.perf_counter() - t0
  574. tokens_generated = y.size(1) - prompt_length
  575. tokens_sec = tokens_generated / t_batch if t_batch > 0 else 0
  576. logger.info(
  577. f"Batch {batch_idx}: Generated {tokens_generated} tokens in "
  578. f"{t_batch:.02f} seconds, {tokens_sec:.02f} tokens/sec"
  579. )
  580. logger.info(
  581. f"Bandwidth achieved: {model_size * tokens_sec / 1e9:.02f} GB/s"
  582. )
  583. # Extract generated codes
  584. codes = y[1:, prompt_length:-1].clone()
  585. assert (codes >= 0).all(), f"Negative code found: {codes}"
  586. # Add assistant message with generated codes back to conversation
  587. conversation.append(
  588. Message(
  589. role="assistant",
  590. parts=[VQPart(codes=codes.cpu(), cal_loss=False)],
  591. cal_loss=False,
  592. modality="voice",
  593. add_im_start=True,
  594. add_im_end=True,
  595. )
  596. )
  597. yield GenerateResponse(
  598. action="sample", codes=codes, text=batch_text
  599. )
  600. # Cleanup
  601. del y, encoded
  602. if torch.cuda.is_available():
  603. logger.info(
  604. f"GPU Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB"
  605. )
  606. yield GenerateResponse(action="next")
  607. @dataclass
  608. class WrappedGenerateResponse:
  609. status: Literal["success", "error"]
  610. response: Optional[Union[GenerateResponse, Exception]] = None
  611. @dataclass
  612. class GenerateRequest:
  613. request: dict
  614. response_queue: queue.Queue
  615. def launch_thread_safe_queue(
  616. checkpoint_path,
  617. device,
  618. precision,
  619. compile: bool = False,
  620. ):
  621. input_queue = queue.Queue()
  622. init_event = threading.Event()
  623. def worker():
  624. model, decode_one_token = init_model(
  625. checkpoint_path, device, precision, compile=compile
  626. )
  627. with torch.device(device):
  628. model.setup_caches(
  629. max_batch_size=1,
  630. max_seq_len=model.config.max_seq_len,
  631. dtype=next(model.parameters()).dtype,
  632. )
  633. init_event.set()
  634. while True:
  635. item: GenerateRequest | None = input_queue.get()
  636. if item is None:
  637. break
  638. kwargs = item.request
  639. response_queue = item.response_queue
  640. try:
  641. for chunk in generate_long(
  642. model=model, decode_one_token=decode_one_token, **kwargs
  643. ):
  644. response_queue.put(
  645. WrappedGenerateResponse(status="success", response=chunk)
  646. )
  647. # Only clear cache after complete request batch
  648. if torch.cuda.is_available():
  649. torch.cuda.empty_cache()
  650. except Exception as e:
  651. logger.error(traceback.format_exc())
  652. response_queue.put(WrappedGenerateResponse(status="error", response=e))
  653. # Clear cache on error
  654. if torch.cuda.is_available():
  655. torch.cuda.empty_cache()
  656. threading.Thread(target=worker, daemon=True).start()
  657. init_event.wait()
  658. return input_queue
  659. @click.command()
  660. @click.option(
  661. "--text",
  662. type=str,
  663. default="<|speaker:0|>你说的对, 但是原神是一款由米哈游自主研发的开放世界手游.",
  664. )
  665. @click.option("--prompt-text", type=str, default=None, multiple=True)
  666. @click.option(
  667. "--prompt-tokens",
  668. type=click.Path(path_type=Path, exists=True),
  669. default=None,
  670. multiple=True,
  671. )
  672. @click.option(
  673. "--prompt-audio",
  674. type=click.Path(path_type=Path, exists=True),
  675. default=None,
  676. multiple=True,
  677. )
  678. @click.option("--output", type=click.Path(path_type=Path), default=None)
  679. @click.option("--num-samples", type=int, default=1)
  680. @click.option("--max-new-tokens", type=int, default=0)
  681. @click.option("--top-p", type=float, default=0.9)
  682. @click.option("--top-k", type=int, default=30)
  683. @click.option("--temperature", type=float, default=1.0)
  684. @click.option(
  685. "--checkpoint-path",
  686. type=click.Path(path_type=Path, exists=True),
  687. default="checkpoints/s2-pro",
  688. )
  689. @click.option("--device", type=str, default="cuda")
  690. @click.option("--compile/--no-compile", default=False)
  691. @click.option("--seed", type=int, default=42)
  692. @click.option("--half/--no-half", default=False)
  693. @click.option("--iterative-prompt/--no-iterative-prompt", default=True)
  694. @click.option("--chunk-length", type=int, default=300)
  695. @click.option("--output-dir", type=Path, default="output")
  696. def main(
  697. text: str,
  698. prompt_text: Optional[tuple[str, ...]],
  699. prompt_tokens: Optional[tuple[Path, ...]],
  700. prompt_audio: Optional[tuple[Path, ...]],
  701. output: Optional[Path],
  702. num_samples: int,
  703. max_new_tokens: int,
  704. top_p: float,
  705. top_k: int,
  706. temperature: float,
  707. checkpoint_path: Path,
  708. device: str,
  709. compile: bool,
  710. seed: int,
  711. half: bool,
  712. iterative_prompt: bool,
  713. chunk_length: int,
  714. output_dir: Path,
  715. ) -> None:
  716. os.makedirs(output_dir, exist_ok=True)
  717. precision = torch.half if half else torch.bfloat16
  718. if prompt_text and not prompt_audio and not prompt_tokens:
  719. raise ValueError(
  720. "--prompt-text requires either --prompt-audio or --prompt-tokens"
  721. )
  722. if (
  723. prompt_text
  724. and prompt_tokens
  725. and len(prompt_text) != len(prompt_tokens)
  726. ):
  727. raise ValueError(
  728. f"Number of prompt text ({len(prompt_text)}) and prompt tokens ({len(prompt_tokens)}) should be the same"
  729. )
  730. if (
  731. prompt_text
  732. and prompt_audio
  733. and len(prompt_text) != len(prompt_audio)
  734. ):
  735. raise ValueError(
  736. f"Number of prompt text ({len(prompt_text)}) and prompt audio ({len(prompt_audio)}) should be the same"
  737. )
  738. logger.info("Loading model ...")
  739. t0 = time.time()
  740. model, decode_one_token = init_model(
  741. checkpoint_path, device, precision, compile=compile
  742. )
  743. with torch.device(device):
  744. model.setup_caches(
  745. max_batch_size=1,
  746. max_seq_len=model.config.max_seq_len,
  747. dtype=next(model.parameters()).dtype,
  748. )
  749. if torch.cuda.is_available():
  750. torch.cuda.synchronize()
  751. logger.info(f"Time to load model: {time.time() - t0:.02f} seconds")
  752. codec = None
  753. codec_checkpoint = checkpoint_path / "codec.pth"
  754. # Handle prompt: --prompt-audio takes priority over --prompt-tokens
  755. prompt_tokens_list = None
  756. if prompt_audio:
  757. logger.info("Loading codec model for audio encoding...")
  758. codec = load_codec_model(codec_checkpoint, device, precision)
  759. prompt_tokens_list = [
  760. encode_audio(p, codec, device).cpu() for p in prompt_audio
  761. ]
  762. logger.info(
  763. f"Encoded {len(prompt_audio)} audio file(s) to VQ codes"
  764. )
  765. elif prompt_tokens is not None:
  766. prompt_tokens_list = [torch.from_numpy(np.load(p)) for p in prompt_tokens]
  767. torch.manual_seed(seed)
  768. if torch.cuda.is_available():
  769. torch.cuda.manual_seed(seed)
  770. generator = generate_long(
  771. model=model,
  772. device=device,
  773. decode_one_token=decode_one_token,
  774. text=text,
  775. num_samples=num_samples,
  776. max_new_tokens=max_new_tokens,
  777. top_p=top_p,
  778. top_k=top_k,
  779. temperature=temperature,
  780. compile=compile,
  781. iterative_prompt=iterative_prompt,
  782. chunk_length=chunk_length,
  783. prompt_text=list(prompt_text) if prompt_text else None,
  784. prompt_tokens=prompt_tokens_list,
  785. )
  786. idx = 0
  787. codes = []
  788. for response in generator:
  789. if response.action == "sample":
  790. codes.append(response.codes)
  791. logger.info(f"Sampled text: {response.text}")
  792. elif response.action == "next":
  793. if codes:
  794. merged_codes = torch.cat(codes, dim=1)
  795. codes_npy_path = os.path.join(output_dir, f"codes_{idx}.npy")
  796. np.save(codes_npy_path, merged_codes.cpu().numpy())
  797. logger.info(f"Saved codes to {codes_npy_path}")
  798. # Decode to wav if --output is specified
  799. if output:
  800. if codec is None:
  801. logger.info("Loading codec model for audio decoding...")
  802. codec = load_codec_model(
  803. codec_checkpoint, device, precision
  804. )
  805. audio = decode_to_audio(merged_codes.to(device), codec)
  806. import soundfile as sf
  807. out_path = (
  808. str(output)
  809. if num_samples == 1
  810. else str(output.with_stem(f"{output.stem}_{idx}"))
  811. )
  812. sf.write(out_path, audio.cpu().float().numpy(), codec.sample_rate)
  813. logger.info(f"Saved audio to {out_path}")
  814. logger.info(f"Next sample")
  815. codes = []
  816. idx += 1
  817. else:
  818. logger.error(f"Error: {response}")
  819. if __name__ == "__main__":
  820. main()