inference.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971
  1. import os
  2. import queue
  3. import re
  4. import threading
  5. import time
  6. import traceback
  7. from copy import deepcopy
  8. from dataclasses import dataclass
  9. from pathlib import Path
  10. from typing import Callable, Literal, Optional, Tuple, Union
  11. import click
  12. import numpy as np
  13. import torch
  14. import torch._inductor.config
  15. from loguru import logger
  16. from tqdm import tqdm
  17. from fish_speech.content_sequence import (
  18. TextPart,
  19. VQPart,
  20. )
  21. from fish_speech.conversation import Conversation, Message
  22. from fish_speech.tokenizer import IM_END_TOKEN
  23. os.environ["TOKENIZERS_PARALLELISM"] = "false"
  24. torch._inductor.config.coordinate_descent_tuning = True
  25. torch._inductor.config.triton.unique_kernel_names = True
  26. if hasattr(torch._inductor.config, "fx_graph_cache"):
  27. torch._inductor.config.fx_graph_cache = True
  28. from torch.nn.attention import SDPBackend, sdpa_kernel
  29. from fish_speech.models.text2semantic.llama import (
  30. BaseTransformer,
  31. DualARTransformer,
  32. NaiveTransformer,
  33. )
  34. def multinomial_sample_one_no_sync(
  35. probs_sort,
  36. ): # Does multinomial sampling without a cuda synchronization
  37. q = torch.empty_like(probs_sort).exponential_(1)
  38. return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
  39. RAS_WIN_SIZE = 10 # window for Repetition Aware Sampling
  40. RAS_HIGH_TEMP = 1.0
  41. RAS_HIGH_TOP_P = 0.9
  42. def logits_to_probs(
  43. logits,
  44. temperature: torch.Tensor,
  45. top_p: torch.Tensor,
  46. top_k: torch.Tensor,
  47. ) -> torch.Tensor:
  48. # Sort and compute top-p mask
  49. sorted_logits, sorted_indices = torch.sort(logits, descending=True)
  50. cum_probs = torch.cumsum(torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1)
  51. sorted_indices_to_remove = cum_probs > top_p
  52. # top-k mask
  53. sorted_indices_to_remove[top_k:] = True
  54. sorted_indices_to_remove[0] = False # keep at least one option
  55. indices_to_remove = sorted_indices_to_remove.scatter(
  56. dim=-1, index=sorted_indices, src=sorted_indices_to_remove
  57. )
  58. logits = logits.masked_fill(indices_to_remove, -float("Inf"))
  59. logits = logits / torch.clip(temperature, min=1e-5)
  60. probs = torch.nn.functional.softmax(logits, dim=-1)
  61. return probs
  62. def sample(
  63. logits,
  64. temperature: torch.Tensor,
  65. top_p: torch.Tensor,
  66. top_k: int,
  67. ) -> Tuple[torch.Tensor, torch.Tensor]:
  68. probs = logits_to_probs(
  69. logits=logits[0, -1],
  70. temperature=temperature,
  71. top_p=top_p,
  72. top_k=top_k,
  73. )
  74. idx_next = multinomial_sample_one_no_sync(probs)
  75. return idx_next, probs
  76. def decode_one_token_ar(
  77. model: DualARTransformer,
  78. x: torch.Tensor,
  79. input_pos: torch.Tensor,
  80. temperature: torch.Tensor,
  81. top_p: torch.Tensor,
  82. top_k: int,
  83. semantic_logit_bias: torch.Tensor,
  84. audio_masks: torch.Tensor,
  85. audio_parts: torch.Tensor,
  86. previous_tokens: Optional[torch.Tensor] = None,
  87. ) -> torch.Tensor:
  88. forward_result = model.forward_generate(
  89. x,
  90. input_pos,
  91. audio_masks=audio_masks,
  92. audio_parts=audio_parts,
  93. )
  94. logits = forward_result.logits # (1, 1, vocab_size)
  95. hidden_states = forward_result.hidden_states
  96. # Apply constrained decoding: only allow semantic tokens + im_end
  97. biased_logits = logits + semantic_logit_bias
  98. # Normal sample
  99. main_token_normal = sample(
  100. biased_logits, temperature=temperature, top_p=top_p, top_k=top_k
  101. )[0]
  102. # RAS: also sample with high temp to use as fallback if token repeats
  103. high_temp = torch.tensor(
  104. RAS_HIGH_TEMP, device=temperature.device, dtype=temperature.dtype
  105. )
  106. high_top_p = torch.tensor(RAS_HIGH_TOP_P, device=top_p.device, dtype=top_p.dtype)
  107. main_token_high = sample(
  108. biased_logits, temperature=high_temp, top_p=high_top_p, top_k=top_k
  109. )[0]
  110. # Use high-temp sample if: token is semantic AND token is in previous window
  111. if previous_tokens is not None:
  112. in_window = (previous_tokens[0] == main_token_normal).any()
  113. # Use tensor ops (&, torch.where) instead of Python (and, if) — torch.compile requires no data-dependent branching
  114. is_semantic = (main_token_normal >= model.config.semantic_begin_id) & (
  115. main_token_normal <= model.config.semantic_end_id
  116. )
  117. should_use_high = in_window & is_semantic
  118. main_token_normal = torch.where(
  119. should_use_high, main_token_high, main_token_normal
  120. )
  121. codebooks = [main_token_normal]
  122. # Only clear cache for fast_layers, avoid clearing main model cache
  123. for layer in model.fast_layers:
  124. if hasattr(layer, "attention") and hasattr(layer.attention, "kv_cache"):
  125. layer.attention.kv_cache.k_cache.fill_(0)
  126. layer.attention.kv_cache.v_cache.fill_(0)
  127. input_pos = torch.tensor([0], device=hidden_states.device, dtype=torch.long)
  128. model.forward_generate_fast(hidden_states, input_pos)
  129. # [MODIFIED] Access config instead of tokenizer
  130. a = codebooks[0] - model.config.semantic_begin_id
  131. a[a < 0] = 0
  132. a[a >= model.config.codebook_size] = 0
  133. hidden_states = model.fast_embeddings(a)
  134. codebooks.append(a)
  135. for codebook_idx in range(1, model.config.num_codebooks):
  136. input_pos = torch.tensor(
  137. [codebook_idx], device=hidden_states.device, dtype=torch.long
  138. )
  139. logits = model.forward_generate_fast(hidden_states, input_pos)
  140. short_logits = logits # DualAR predicts config.codebook_size number of tokens
  141. # Convert logits to probs (no constrain for fast codebooks)
  142. a = sample(
  143. short_logits,
  144. temperature=temperature,
  145. top_p=top_p,
  146. top_k=top_k,
  147. )[0]
  148. hidden_states = model.fast_embeddings(a)
  149. codebooks.append(a)
  150. codebooks = torch.stack(codebooks, dim=1)
  151. # Only delete references, let Python GC handle cleanup
  152. del logits, hidden_states, forward_result
  153. return codebooks.T
  154. def decode_n_tokens(
  155. model: DualARTransformer,
  156. cur_token: torch.Tensor,
  157. input_pos: torch.Tensor,
  158. num_new_tokens: int,
  159. temperature: torch.Tensor,
  160. top_p: torch.Tensor,
  161. top_k: int,
  162. semantic_logit_bias: torch.Tensor,
  163. audio_masks: torch.Tensor,
  164. audio_parts: torch.Tensor,
  165. decode_one_token=decode_one_token_ar,
  166. ):
  167. # Rolling window for RAS (Repetition Aware Sampling)
  168. previous_tokens = torch.zeros(
  169. (model.config.num_codebooks + 1, RAS_WIN_SIZE),
  170. dtype=torch.int,
  171. device=cur_token.device,
  172. )
  173. # Accumulate all generated tokens (the actual output)
  174. new_tokens = []
  175. # [MODIFIED] Pre-fetch ID for efficiency loop
  176. im_end_id = model.tokenizer.get_token_id(IM_END_TOKEN)
  177. for i in tqdm(range(num_new_tokens)):
  178. with sdpa_kernel(SDPBackend.MATH):
  179. next_token = decode_one_token(
  180. model=model,
  181. x=cur_token,
  182. input_pos=input_pos,
  183. previous_tokens=previous_tokens,
  184. temperature=temperature,
  185. top_p=top_p,
  186. top_k=top_k,
  187. semantic_logit_bias=semantic_logit_bias,
  188. audio_masks=audio_masks,
  189. audio_parts=audio_parts,
  190. ).clone()
  191. input_pos += 1
  192. cur_token = next_token.view(1, model.config.num_codebooks + 1, -1)
  193. # Roll RAS window left and insert new token at end
  194. previous_tokens = previous_tokens.roll(-1, dims=1)
  195. previous_tokens[:, -1] = next_token.view(model.config.num_codebooks + 1, -1)[
  196. :, 0
  197. ]
  198. new_tokens.append(next_token)
  199. if cur_token[0, 0, -1] == im_end_id:
  200. break
  201. del cur_token
  202. return torch.cat(new_tokens, dim=1)
  203. @torch.no_grad()
  204. @torch.inference_mode()
  205. def generate(
  206. *,
  207. model: DualARTransformer,
  208. prompt: torch.Tensor,
  209. max_new_tokens: int,
  210. audio_masks: torch.Tensor,
  211. audio_parts: torch.Tensor,
  212. decode_one_token=decode_one_token_ar,
  213. num_samples: int = 1,
  214. **sampling_kwargs,
  215. ):
  216. """
  217. Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested.
  218. """
  219. # create an empty tensor of the expected final shape and fill in the current tokens
  220. T = prompt.size(1)
  221. prompt = prompt[None].repeat(num_samples, 1, 1)
  222. if T >= model.config.max_seq_len:
  223. raise ValueError(
  224. f"Input sequence length {T} exceeds max_seq_len {model.config.max_seq_len}"
  225. )
  226. if max_new_tokens:
  227. if T + max_new_tokens > model.config.max_seq_len:
  228. max_new_tokens = model.config.max_seq_len - T
  229. T_new = T + max_new_tokens
  230. else:
  231. T_new = model.config.max_seq_len
  232. max_new_tokens = T_new - T
  233. device = prompt.device
  234. dtype = next(
  235. model.parameters()
  236. ).dtype # model weight dtype (bfloat16), NOT prompt dtype (int32)
  237. # Critical fix: Only set up cache on first run or when necessary
  238. if not hasattr(model, "_cache_setup_done") or not model._cache_setup_done:
  239. with torch.device(device):
  240. model.setup_caches(
  241. max_batch_size=1, # Fixed to 1, avoid dynamic changes
  242. max_seq_len=model.config.max_seq_len,
  243. dtype=next(model.parameters()).dtype,
  244. )
  245. model._cache_setup_done = True
  246. codebook_dim = 1 + model.config.num_codebooks
  247. # Create new tensor each time, but try to reuse memory
  248. input_pos = torch.arange(0, T, device=device, dtype=torch.long)
  249. empty = torch.empty(
  250. (codebook_dim, model.config.max_seq_len), dtype=prompt.dtype, device=device
  251. )
  252. empty[:, :T] = prompt
  253. seq = empty
  254. temp_val = sampling_kwargs.get("temperature", 1.0)
  255. top_p_val = sampling_kwargs.get("top_p", 0.9)
  256. top_k_val = sampling_kwargs.get("top_k", 30)
  257. temperature = torch.tensor(temp_val, device=device, dtype=dtype)
  258. top_p = torch.tensor(top_p_val, device=device, dtype=dtype)
  259. # Build semantic logit bias: 0 for semantic tokens + im_end, -inf for all others
  260. vocab_size = model.config.vocab_size
  261. semantic_logit_bias = torch.full(
  262. (1, 1, vocab_size), float("-inf"), device=device, dtype=dtype
  263. )
  264. # [MODIFIED] Use config for semantic range
  265. semantic_logit_bias[
  266. 0, 0, model.config.semantic_begin_id : model.config.semantic_end_id + 1
  267. ] = 0.0
  268. # [MODIFIED] Use tokenizer.get_token_id (Wrapper method)
  269. semantic_logit_bias[0, 0, model.tokenizer.get_token_id(IM_END_TOKEN)] = 0.0
  270. prefill_decode = decode_one_token_ar
  271. first_token = prefill_decode(
  272. model,
  273. prompt.view(1, codebook_dim, -1),
  274. input_pos,
  275. temperature,
  276. top_p,
  277. top_k_val,
  278. semantic_logit_bias,
  279. audio_masks,
  280. audio_parts,
  281. )
  282. seq[:, T : T + 1] = first_token
  283. # Recreate input_pos
  284. input_pos = torch.tensor([T], device=device, dtype=torch.int)
  285. x = decode_n_tokens(
  286. model,
  287. first_token.view(1, codebook_dim, -1),
  288. input_pos,
  289. max_new_tokens - 1,
  290. temperature=temperature,
  291. top_p=top_p,
  292. top_k=top_k_val,
  293. semantic_logit_bias=semantic_logit_bias,
  294. audio_masks=audio_masks,
  295. audio_parts=audio_parts,
  296. decode_one_token=decode_one_token,
  297. )
  298. seq = seq[:, : T + 1 + x.size(1)]
  299. seq[:, T + 1 :] = x
  300. # Clean up temporary variables
  301. del first_token, x, prompt, empty, input_pos
  302. return seq
  303. def init_model(checkpoint_path, device, precision, compile=False):
  304. model = DualARTransformer.from_pretrained(checkpoint_path, load_weights=True)
  305. model = model.to(device=device, dtype=precision)
  306. logger.info(f"Restored model from checkpoint")
  307. if isinstance(model, DualARTransformer):
  308. decode_one_token = decode_one_token_ar
  309. # prefill_n_tokens = decode_one_token_ar
  310. logger.info("Using DualARTransformer")
  311. else:
  312. raise ValueError("Unsupported model type")
  313. # Pre-create fixed parameter tensors to avoid runtime creation
  314. model.fixed_temperature = torch.tensor(0.7, device=device, dtype=torch.float)
  315. model.fixed_top_p = torch.tensor(0.7, device=device, dtype=torch.float)
  316. model.fixed_repetition_penalty = torch.tensor(1.5, device=device, dtype=torch.float)
  317. # Mark whether cache has been initialized
  318. model._cache_setup_done = False
  319. if compile:
  320. logger.info("Compiling function...")
  321. decode_one_token = torch.compile(
  322. decode_one_token,
  323. backend="inductor" if torch.cuda.is_available() else "aot_eager",
  324. mode="reduce-overhead" if torch.cuda.is_available() else None,
  325. fullgraph=True,
  326. )
  327. return model.eval(), decode_one_token
  328. @torch.inference_mode()
  329. def load_codec_model(codec_checkpoint_path, device, precision=torch.bfloat16):
  330. """Load the DAC codec model for audio encoding/decoding."""
  331. from hydra.utils import instantiate
  332. from omegaconf import OmegaConf
  333. config_path = Path(__file__).parent.parent.parent / "configs" / "modded_dac_vq.yaml"
  334. cfg = OmegaConf.load(str(config_path))
  335. codec = instantiate(cfg)
  336. state_dict = torch.load(codec_checkpoint_path, map_location="cpu")
  337. if "state_dict" in state_dict:
  338. state_dict = state_dict["state_dict"]
  339. if any("generator" in k for k in state_dict):
  340. state_dict = {
  341. k.replace("generator.", ""): v
  342. for k, v in state_dict.items()
  343. if "generator." in k
  344. }
  345. codec.load_state_dict(state_dict, strict=False)
  346. codec.eval()
  347. codec.to(device=device, dtype=precision)
  348. return codec
  349. @torch.inference_mode()
  350. def encode_audio(audio_path, codec, device):
  351. """Encode an audio file to VQ codes."""
  352. import torchaudio
  353. wav, sr = torchaudio.load(str(audio_path))
  354. if wav.shape[0] > 1:
  355. wav = wav.mean(dim=0, keepdim=True)
  356. wav = torchaudio.functional.resample(wav.to(device), sr, codec.sample_rate)[0]
  357. # Match codec model dtype (e.g. bfloat16)
  358. model_dtype = next(codec.parameters()).dtype
  359. audios = wav[None, None].to(dtype=model_dtype) # (1, 1, T)
  360. audio_lengths = torch.tensor([len(wav)], device=device, dtype=torch.long)
  361. indices, feature_lengths = codec.encode(audios, audio_lengths)
  362. return indices[0, :, : feature_lengths[0]] # (num_codebooks, T)
  363. @torch.inference_mode()
  364. def decode_to_audio(codes, codec):
  365. """Decode VQ codes to audio waveform."""
  366. # codes: (num_codebooks, T) -> (1, num_codebooks, T)
  367. audio = codec.from_indices(codes[None])
  368. return audio[0, 0] # (T,) mono waveform
  369. @dataclass
  370. class GenerateResponse:
  371. action: Literal["sample", "next"]
  372. codes: Optional[torch.Tensor] = None
  373. text: Optional[str] = None
  374. def split_text_by_speaker(text: str) -> list[str]:
  375. """
  376. Split text into turns based on <|speaker:X|> tags.
  377. Args:
  378. text: The full text with speaker tags
  379. Returns:
  380. List of speaker turns, each starting with <|speaker:X|>
  381. """
  382. pattern = r"(<\|speaker:\d+\|>)"
  383. parts = re.split(pattern, text)
  384. turns = []
  385. i = 0
  386. while i < len(parts):
  387. part = parts[i].strip()
  388. if re.match(pattern, part):
  389. if i + 1 < len(parts):
  390. turn = part + parts[i + 1]
  391. turns.append(turn.strip())
  392. i += 2
  393. else:
  394. turns.append(part)
  395. i += 1
  396. else:
  397. i += 1
  398. return turns
  399. def group_turns_into_batches(
  400. turns: list[str], max_speakers: int = 3, max_bytes: int = 300
  401. ) -> list[str]:
  402. """
  403. Group turns into batches based on speaker count or byte limit.
  404. Args:
  405. turns: List of speaker turns
  406. max_speakers: Maximum number of speakers per batch (default 3)
  407. max_bytes: Maximum UTF-8 bytes per batch (default 300)
  408. Returns:
  409. List of batched text strings
  410. """
  411. batches = []
  412. current_batch = []
  413. current_bytes = 0
  414. for turn in turns:
  415. turn_bytes = len(turn.encode("utf-8"))
  416. would_exceed_speakers = len(current_batch) >= max_speakers
  417. would_exceed_bytes = current_bytes + turn_bytes > max_bytes and current_batch
  418. if would_exceed_speakers or would_exceed_bytes:
  419. batches.append("\n".join(current_batch))
  420. current_batch = [turn]
  421. current_bytes = turn_bytes
  422. else:
  423. current_batch.append(turn)
  424. current_bytes += turn_bytes
  425. if current_batch:
  426. batches.append("\n".join(current_batch))
  427. return batches
  428. def generate_long(
  429. *,
  430. model,
  431. device: Union[str, torch.device],
  432. decode_one_token: Callable,
  433. text: str,
  434. num_samples: int = 1,
  435. max_new_tokens: int = 0,
  436. top_p: float = 0.9,
  437. top_k: int = 30,
  438. repetition_penalty: float = 1.1,
  439. temperature: float = 1.0,
  440. compile: bool = False,
  441. iterative_prompt: bool = True,
  442. chunk_length: int = 512,
  443. prompt_text: Optional[Union[str, list[str]]] = None,
  444. prompt_tokens: Optional[Union[torch.Tensor, list[torch.Tensor]]] = None,
  445. ):
  446. assert 0 < top_p <= 1, "top_p must be in (0, 1]"
  447. assert 0 < temperature < 2, "temperature must be in (0, 2)"
  448. use_prompt = bool(prompt_text) and bool(prompt_tokens)
  449. if use_prompt and isinstance(prompt_text, str):
  450. prompt_text = [prompt_text]
  451. prompt_tokens = [prompt_tokens]
  452. if use_prompt:
  453. assert len(prompt_text) == len(
  454. prompt_tokens
  455. ), "Prompt text and tokens must have the same length"
  456. if prompt_tokens:
  457. prompt_tokens = [i.cpu() for i in prompt_tokens]
  458. model_size = sum(p.numel() for p in model.parameters() if p.requires_grad)
  459. tokenizer = model.tokenizer
  460. max_length = model.config.max_seq_len
  461. # Build base conversation with system message
  462. base_conversation = Conversation()
  463. if use_prompt:
  464. # Auto-add speaker tags to prompt texts that don't have them
  465. tagged_prompt_text = []
  466. for i, t in enumerate(prompt_text):
  467. if not re.search(r"<\|speaker:\d+\|>", t):
  468. tagged_prompt_text.append(f"<|speaker:{i}|>{t}")
  469. else:
  470. tagged_prompt_text.append(t)
  471. system_parts = [
  472. TextPart(
  473. text="convert the provided text to speech reference to the following:\n\nText:\n",
  474. cal_loss=False,
  475. ),
  476. ]
  477. reference_text = "\n".join(tagged_prompt_text)
  478. system_parts.append(TextPart(text=reference_text, cal_loss=False))
  479. system_parts.append(TextPart(text="\n\nSpeech:\n", cal_loss=False))
  480. all_codes = torch.cat([c for c in prompt_tokens], dim=1)
  481. system_parts.append(VQPart(codes=all_codes, cal_loss=False))
  482. # torch.save(all_codes, "debug_vq_codes.pt")
  483. else:
  484. system_parts = [
  485. TextPart(text="convert the provided text to speech", cal_loss=False)
  486. ]
  487. base_conversation.append(
  488. Message(
  489. role="system",
  490. parts=system_parts,
  491. cal_loss=False,
  492. add_im_start=True,
  493. add_im_end=True,
  494. )
  495. )
  496. # Split text by speaker and group into batches
  497. turns = split_text_by_speaker(text)
  498. if turns:
  499. batches = group_turns_into_batches(
  500. turns, max_speakers=5, max_bytes=chunk_length
  501. )
  502. else:
  503. batches = [text]
  504. logger.info(f"Split into {len(turns)} turns, grouped into {len(batches)} batches")
  505. for sample_idx in range(num_samples):
  506. if torch.cuda.is_available():
  507. torch.cuda.synchronize()
  508. t0 = time.perf_counter()
  509. # Deep copy base conversation for this sample
  510. conversation = deepcopy(base_conversation)
  511. for batch_idx, batch_text in enumerate(batches):
  512. logger.info(
  513. f"--- Sample {sample_idx}, Batch {batch_idx} "
  514. f"({len(batch_text.encode('utf-8'))} bytes) ---"
  515. )
  516. logger.info(f"Batch text: {batch_text}")
  517. # Add user message
  518. conversation.append(
  519. Message(
  520. role="user",
  521. parts=[TextPart(text=batch_text, cal_loss=False)],
  522. cal_loss=False,
  523. add_im_start=True,
  524. add_im_end=True,
  525. )
  526. )
  527. # Deep copy for generation (don't pollute original conversation)
  528. conversation_gen = deepcopy(conversation)
  529. conversation_gen.append(
  530. Message(
  531. role="assistant",
  532. parts=[],
  533. cal_loss=False,
  534. modality="voice",
  535. add_im_start=True,
  536. add_im_end=False,
  537. )
  538. )
  539. logger.info("Visualizing prompt structure:")
  540. conversation_gen.visualize(
  541. tokenizer,
  542. merge_audio_tokens=True,
  543. merge_semantic_tokens=True,
  544. )
  545. encoded, audio_masks, audio_parts = conversation_gen.encode_for_inference(
  546. tokenizer, num_codebooks=model.config.num_codebooks
  547. )
  548. logger.info(f"Encoded prompt shape: {encoded.shape}")
  549. if audio_parts is not None:
  550. logger.info(f"Audio parts shape: {audio_parts.shape}")
  551. if audio_masks is not None:
  552. logger.info(
  553. f"Audio masks non-zero count: {torch.count_nonzero(audio_masks)}"
  554. )
  555. if encoded.size(1) > max_length - 2048:
  556. raise ValueError(
  557. f"Prompt is too long: {encoded.size(1)} > {max_length - 2048}"
  558. )
  559. encoded = encoded.to(device=device)
  560. prompt_length = encoded.size(1)
  561. y = generate(
  562. model=model,
  563. prompt=encoded,
  564. max_new_tokens=max_new_tokens,
  565. audio_masks=audio_masks,
  566. audio_parts=audio_parts,
  567. decode_one_token=decode_one_token,
  568. temperature=temperature,
  569. top_p=top_p,
  570. top_k=top_k,
  571. )
  572. if sample_idx == 0 and batch_idx == 0 and compile:
  573. logger.info(f"Compilation time: {time.perf_counter() - t0:.2f} seconds")
  574. if torch.cuda.is_available():
  575. torch.cuda.synchronize()
  576. t_batch = time.perf_counter() - t0
  577. tokens_generated = y.size(1) - prompt_length
  578. tokens_sec = tokens_generated / t_batch if t_batch > 0 else 0
  579. logger.info(
  580. f"Batch {batch_idx}: Generated {tokens_generated} tokens in "
  581. f"{t_batch:.02f} seconds, {tokens_sec:.02f} tokens/sec"
  582. )
  583. logger.info(
  584. f"Bandwidth achieved: {model_size * tokens_sec / 1e9:.02f} GB/s"
  585. )
  586. # Extract generated codes
  587. codes = y[1:, prompt_length:-1].clone()
  588. assert (codes >= 0).all(), f"Negative code found: {codes}"
  589. # Add assistant message with generated codes back to conversation
  590. conversation.append(
  591. Message(
  592. role="assistant",
  593. parts=[VQPart(codes=codes.cpu(), cal_loss=False)],
  594. cal_loss=False,
  595. modality="voice",
  596. add_im_start=True,
  597. add_im_end=True,
  598. )
  599. )
  600. yield GenerateResponse(action="sample", codes=codes, text=batch_text)
  601. # Cleanup
  602. del y, encoded
  603. if torch.cuda.is_available():
  604. logger.info(
  605. f"GPU Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB"
  606. )
  607. yield GenerateResponse(action="next")
  608. @dataclass
  609. class WrappedGenerateResponse:
  610. status: Literal["success", "error"]
  611. response: Optional[Union[GenerateResponse, Exception]] = None
  612. @dataclass
  613. class GenerateRequest:
  614. request: dict
  615. response_queue: queue.Queue
  616. def launch_thread_safe_queue(
  617. checkpoint_path,
  618. device,
  619. precision,
  620. compile: bool = False,
  621. ):
  622. input_queue = queue.Queue()
  623. init_event = threading.Event()
  624. def worker():
  625. model, decode_one_token = init_model(
  626. checkpoint_path, device, precision, compile=compile
  627. )
  628. with torch.device(device):
  629. model.setup_caches(
  630. max_batch_size=1,
  631. max_seq_len=model.config.max_seq_len,
  632. dtype=next(model.parameters()).dtype,
  633. )
  634. init_event.set()
  635. while True:
  636. item: GenerateRequest | None = input_queue.get()
  637. if item is None:
  638. break
  639. kwargs = item.request
  640. response_queue = item.response_queue
  641. try:
  642. for chunk in generate_long(
  643. model=model, decode_one_token=decode_one_token, **kwargs
  644. ):
  645. response_queue.put(
  646. WrappedGenerateResponse(status="success", response=chunk)
  647. )
  648. # Only clear cache after complete request batch
  649. if torch.cuda.is_available():
  650. torch.cuda.empty_cache()
  651. except Exception as e:
  652. logger.error(traceback.format_exc())
  653. response_queue.put(WrappedGenerateResponse(status="error", response=e))
  654. # Clear cache on error
  655. if torch.cuda.is_available():
  656. torch.cuda.empty_cache()
  657. threading.Thread(target=worker, daemon=True).start()
  658. init_event.wait()
  659. return input_queue
  660. @click.command()
  661. @click.option(
  662. "--text",
  663. type=str,
  664. default="<|speaker:0|>你说的对, 但是原神是一款由米哈游自主研发的开放世界手游.",
  665. )
  666. @click.option("--prompt-text", type=str, default=None, multiple=True)
  667. @click.option(
  668. "--prompt-tokens",
  669. type=click.Path(path_type=Path, exists=True),
  670. default=None,
  671. multiple=True,
  672. )
  673. @click.option(
  674. "--prompt-audio",
  675. type=click.Path(path_type=Path, exists=True),
  676. default=None,
  677. multiple=True,
  678. )
  679. @click.option("--output", type=click.Path(path_type=Path), default=None)
  680. @click.option("--num-samples", type=int, default=1)
  681. @click.option("--max-new-tokens", type=int, default=0)
  682. @click.option("--top-p", type=float, default=0.9)
  683. @click.option("--top-k", type=int, default=30)
  684. @click.option("--temperature", type=float, default=1.0)
  685. @click.option(
  686. "--checkpoint-path",
  687. type=click.Path(path_type=Path, exists=True),
  688. default="checkpoints/s2-pro",
  689. )
  690. @click.option("--device", type=str, default="cuda")
  691. @click.option("--compile/--no-compile", default=False)
  692. @click.option("--seed", type=int, default=42)
  693. @click.option("--half/--no-half", default=False)
  694. @click.option("--iterative-prompt/--no-iterative-prompt", default=True)
  695. @click.option("--chunk-length", type=int, default=300)
  696. @click.option("--output-dir", type=Path, default="output")
  697. def main(
  698. text: str,
  699. prompt_text: Optional[tuple[str, ...]],
  700. prompt_tokens: Optional[tuple[Path, ...]],
  701. prompt_audio: Optional[tuple[Path, ...]],
  702. output: Optional[Path],
  703. num_samples: int,
  704. max_new_tokens: int,
  705. top_p: float,
  706. top_k: int,
  707. temperature: float,
  708. checkpoint_path: Path,
  709. device: str,
  710. compile: bool,
  711. seed: int,
  712. half: bool,
  713. iterative_prompt: bool,
  714. chunk_length: int,
  715. output_dir: Path,
  716. ) -> None:
  717. os.makedirs(output_dir, exist_ok=True)
  718. precision = torch.half if half else torch.bfloat16
  719. if prompt_text and not prompt_audio and not prompt_tokens:
  720. raise ValueError(
  721. "--prompt-text requires either --prompt-audio or --prompt-tokens"
  722. )
  723. if prompt_text and prompt_tokens and len(prompt_text) != len(prompt_tokens):
  724. raise ValueError(
  725. f"Number of prompt text ({len(prompt_text)}) and prompt tokens ({len(prompt_tokens)}) should be the same"
  726. )
  727. if prompt_text and prompt_audio and len(prompt_text) != len(prompt_audio):
  728. raise ValueError(
  729. f"Number of prompt text ({len(prompt_text)}) and prompt audio ({len(prompt_audio)}) should be the same"
  730. )
  731. logger.info("Loading model ...")
  732. t0 = time.time()
  733. model, decode_one_token = init_model(
  734. checkpoint_path, device, precision, compile=compile
  735. )
  736. with torch.device(device):
  737. model.setup_caches(
  738. max_batch_size=1,
  739. max_seq_len=model.config.max_seq_len,
  740. dtype=next(model.parameters()).dtype,
  741. )
  742. if torch.cuda.is_available():
  743. torch.cuda.synchronize()
  744. logger.info(f"Time to load model: {time.time() - t0:.02f} seconds")
  745. codec = None
  746. codec_checkpoint = checkpoint_path / "codec.pth"
  747. # Handle prompt: --prompt-audio takes priority over --prompt-tokens
  748. prompt_tokens_list = None
  749. if prompt_audio:
  750. logger.info("Loading codec model for audio encoding...")
  751. codec = load_codec_model(codec_checkpoint, device, precision)
  752. prompt_tokens_list = [
  753. encode_audio(p, codec, device).cpu() for p in prompt_audio
  754. ]
  755. logger.info(f"Encoded {len(prompt_audio)} audio file(s) to VQ codes")
  756. elif prompt_tokens is not None:
  757. prompt_tokens_list = [torch.from_numpy(np.load(p)) for p in prompt_tokens]
  758. torch.manual_seed(seed)
  759. if torch.cuda.is_available():
  760. torch.cuda.manual_seed(seed)
  761. generator = generate_long(
  762. model=model,
  763. device=device,
  764. decode_one_token=decode_one_token,
  765. text=text,
  766. num_samples=num_samples,
  767. max_new_tokens=max_new_tokens,
  768. top_p=top_p,
  769. top_k=top_k,
  770. temperature=temperature,
  771. compile=compile,
  772. iterative_prompt=iterative_prompt,
  773. chunk_length=chunk_length,
  774. prompt_text=list(prompt_text) if prompt_text else None,
  775. prompt_tokens=prompt_tokens_list,
  776. )
  777. idx = 0
  778. codes = []
  779. for response in generator:
  780. if response.action == "sample":
  781. codes.append(response.codes)
  782. logger.info(f"Sampled text: {response.text}")
  783. elif response.action == "next":
  784. if codes:
  785. merged_codes = torch.cat(codes, dim=1)
  786. codes_npy_path = os.path.join(output_dir, f"codes_{idx}.npy")
  787. np.save(codes_npy_path, merged_codes.cpu().numpy())
  788. logger.info(f"Saved codes to {codes_npy_path}")
  789. # Decode to wav if --output is specified
  790. if output:
  791. if codec is None:
  792. logger.info("Loading codec model for audio decoding...")
  793. codec = load_codec_model(codec_checkpoint, device, precision)
  794. audio = decode_to_audio(merged_codes.to(device), codec)
  795. import soundfile as sf
  796. out_path = (
  797. str(output)
  798. if num_samples == 1
  799. else str(output.with_stem(f"{output.stem}_{idx}"))
  800. )
  801. sf.write(out_path, audio.cpu().float().numpy(), codec.sample_rate)
  802. logger.info(f"Saved audio to {out_path}")
  803. logger.info(f"Next sample")
  804. codes = []
  805. idx += 1
  806. else:
  807. logger.error(f"Error: {response}")
  808. if __name__ == "__main__":
  809. main()