inference.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966
  1. import os
  2. import queue
  3. import re
  4. import threading
  5. import time
  6. import traceback
  7. from copy import deepcopy
  8. from dataclasses import dataclass
  9. from pathlib import Path
  10. from typing import Callable, Literal, Optional, Tuple, Union
  11. import click
  12. import numpy as np
  13. import torch
  14. import torch._inductor.config
  15. from loguru import logger
  16. from tqdm import tqdm
  17. from fish_speech.content_sequence import (
  18. TextPart,
  19. VQPart,
  20. )
  21. from fish_speech.conversation import Conversation, Message
  22. from fish_speech.tokenizer import IM_END_TOKEN
  23. os.environ["TOKENIZERS_PARALLELISM"] = "false"
  24. torch._inductor.config.coordinate_descent_tuning = True
  25. torch._inductor.config.triton.unique_kernel_names = True
  26. if hasattr(torch._inductor.config, "fx_graph_cache"):
  27. torch._inductor.config.fx_graph_cache = True
  28. from torch.nn.attention import SDPBackend, sdpa_kernel
  29. from fish_speech.models.text2semantic.llama import (
  30. BaseTransformer,
  31. DualARTransformer,
  32. NaiveTransformer,
  33. )
  34. def multinomial_sample_one_no_sync(probs_sort):
  35. q = torch.rand_like(probs_sort)
  36. q = -torch.log(q)
  37. return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
  38. RAS_WIN_SIZE = 10 # window for Repetition Aware Sampling
  39. RAS_HIGH_TEMP = 1.0
  40. RAS_HIGH_TOP_P = 0.9
  41. def logits_to_probs(
  42. logits,
  43. temperature: torch.Tensor,
  44. top_p: torch.Tensor,
  45. top_k: int, # 注意: 我看到你传进来的是 int,这很关键
  46. ) -> torch.Tensor:
  47. sorted_logits, sorted_indices = torch.sort(logits, descending=True)
  48. cum_probs = torch.cumsum(torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1)
  49. indices = torch.arange(sorted_logits.shape[-1], device=sorted_logits.device)
  50. top_k_mask = indices >= top_k
  51. sorted_indices_to_remove = (cum_probs > top_p) | top_k_mask
  52. sorted_indices_to_remove[0] = False # 单元素修改问题不大,或者写成 | (indices != 0)
  53. indices_to_remove = sorted_indices_to_remove.scatter(
  54. dim=-1, index=sorted_indices, src=sorted_indices_to_remove
  55. )
  56. logits = torch.where(
  57. indices_to_remove, float("-Inf"), logits
  58. ) # 同样替换 masked_fill_ 为 torch.where
  59. logits = logits / torch.clip(temperature, min=1e-5)
  60. probs = torch.nn.functional.softmax(logits, dim=-1)
  61. return probs
  62. def sample(
  63. logits,
  64. temperature: torch.Tensor,
  65. top_p: torch.Tensor,
  66. top_k: int,
  67. ) -> Tuple[torch.Tensor, torch.Tensor]:
  68. probs = logits_to_probs(
  69. logits=logits[0, -1],
  70. temperature=temperature,
  71. top_p=top_p,
  72. top_k=top_k,
  73. )
  74. idx_next = multinomial_sample_one_no_sync(probs)
  75. return idx_next, probs
  76. def decode_one_token_ar(
  77. model: DualARTransformer,
  78. x: torch.Tensor,
  79. input_pos: torch.Tensor,
  80. temperature: torch.Tensor,
  81. top_p: torch.Tensor,
  82. top_k: int,
  83. semantic_logit_bias: torch.Tensor,
  84. audio_masks: torch.Tensor,
  85. audio_parts: torch.Tensor,
  86. previous_tokens: Optional[torch.Tensor] = None,
  87. ) -> torch.Tensor:
  88. forward_result = model.forward_generate(
  89. x,
  90. input_pos,
  91. audio_masks=audio_masks,
  92. audio_parts=audio_parts,
  93. )
  94. logits = forward_result.logits # (1, 1, vocab_size)
  95. hidden_states = forward_result.hidden_states
  96. # Apply constrained decoding: only allow semantic tokens + im_end
  97. biased_logits = logits + semantic_logit_bias
  98. # Normal sample
  99. main_token_normal = sample(
  100. biased_logits, temperature=temperature, top_p=top_p, top_k=top_k
  101. )[0]
  102. # RAS: also sample with high temp to use as fallback if token repeats
  103. high_temp = torch.tensor(
  104. RAS_HIGH_TEMP, device=temperature.device, dtype=temperature.dtype
  105. )
  106. high_top_p = torch.tensor(RAS_HIGH_TOP_P, device=top_p.device, dtype=top_p.dtype)
  107. main_token_high = sample(
  108. biased_logits, temperature=high_temp, top_p=high_top_p, top_k=top_k
  109. )[0]
  110. # Use high-temp sample if: token is semantic AND token is in previous window
  111. if previous_tokens is not None:
  112. in_window = (previous_tokens[0] == main_token_normal).any()
  113. # Use tensor ops (&, torch.where) instead of Python (and, if) — torch.compile requires no data-dependent branching
  114. is_semantic = (main_token_normal >= model.config.semantic_begin_id) & (
  115. main_token_normal <= model.config.semantic_end_id
  116. )
  117. should_use_high = in_window & is_semantic
  118. main_token_normal = torch.where(
  119. should_use_high, main_token_high, main_token_normal
  120. )
  121. codebooks = [main_token_normal]
  122. input_pos = torch.tensor([0], device=hidden_states.device, dtype=torch.long)
  123. model.forward_generate_fast(hidden_states, input_pos)
  124. a = codebooks[0] - model.config.semantic_begin_id
  125. a = torch.clamp(a, min=0, max=model.config.codebook_size - 1)
  126. hidden_states = model.fast_embeddings(a)
  127. codebooks.append(a)
  128. for codebook_idx in range(1, model.config.num_codebooks):
  129. input_pos = torch.tensor(
  130. [codebook_idx], device=hidden_states.device, dtype=torch.long
  131. )
  132. logits = model.forward_generate_fast(hidden_states, input_pos)
  133. short_logits = logits # DualAR predicts config.codebook_size number of tokens
  134. # Convert logits to probs (no constrain for fast codebooks)
  135. a = sample(
  136. short_logits,
  137. temperature=temperature,
  138. top_p=top_p,
  139. top_k=top_k,
  140. )[0]
  141. hidden_states = model.fast_embeddings(a)
  142. codebooks.append(a)
  143. codebooks = torch.stack(codebooks, dim=1)
  144. # Only delete references, let Python GC handle cleanup
  145. del logits, hidden_states, forward_result
  146. return codebooks.T
  147. def decode_n_tokens(
  148. model: DualARTransformer,
  149. cur_token: torch.Tensor,
  150. input_pos: torch.Tensor,
  151. num_new_tokens: int,
  152. temperature: torch.Tensor,
  153. top_p: torch.Tensor,
  154. top_k: int,
  155. semantic_logit_bias: torch.Tensor,
  156. audio_masks: torch.Tensor,
  157. audio_parts: torch.Tensor,
  158. decode_one_token=decode_one_token_ar,
  159. ):
  160. # Rolling window for RAS (Repetition Aware Sampling)
  161. previous_tokens = torch.zeros(
  162. (model.config.num_codebooks + 1, RAS_WIN_SIZE),
  163. dtype=torch.int,
  164. device=cur_token.device,
  165. )
  166. # Accumulate all generated tokens (the actual output)
  167. new_tokens = []
  168. # [MODIFIED] Pre-fetch ID for efficiency loop
  169. im_end_id = model.tokenizer.get_token_id(IM_END_TOKEN)
  170. for i in tqdm(range(num_new_tokens)):
  171. with sdpa_kernel(SDPBackend.MATH):
  172. next_token = decode_one_token(
  173. model=model,
  174. x=cur_token,
  175. input_pos=input_pos,
  176. previous_tokens=previous_tokens,
  177. temperature=temperature,
  178. top_p=top_p,
  179. top_k=top_k,
  180. semantic_logit_bias=semantic_logit_bias,
  181. audio_masks=audio_masks,
  182. audio_parts=audio_parts,
  183. ).clone()
  184. input_pos += 1
  185. cur_token = next_token.view(1, model.config.num_codebooks + 1, -1)
  186. # Roll RAS window left and insert new token at end
  187. previous_tokens = previous_tokens.roll(-1, dims=1)
  188. previous_tokens[:, -1] = next_token.view(model.config.num_codebooks + 1, -1)[
  189. :, 0
  190. ]
  191. new_tokens.append(next_token)
  192. if cur_token[0, 0, -1] == im_end_id:
  193. break
  194. del cur_token
  195. return torch.cat(new_tokens, dim=1)
  196. @torch.no_grad()
  197. @torch.inference_mode()
  198. def generate(
  199. *,
  200. model: DualARTransformer,
  201. prompt: torch.Tensor,
  202. max_new_tokens: int,
  203. audio_masks: torch.Tensor,
  204. audio_parts: torch.Tensor,
  205. decode_one_token=decode_one_token_ar,
  206. num_samples: int = 1,
  207. **sampling_kwargs,
  208. ):
  209. """
  210. Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested.
  211. """
  212. # create an empty tensor of the expected final shape and fill in the current tokens
  213. T = prompt.size(1)
  214. prompt = prompt[None].repeat(num_samples, 1, 1)
  215. if T >= model.config.max_seq_len:
  216. raise ValueError(
  217. f"Input sequence length {T} exceeds max_seq_len {model.config.max_seq_len}"
  218. )
  219. if max_new_tokens:
  220. if T + max_new_tokens > model.config.max_seq_len:
  221. max_new_tokens = model.config.max_seq_len - T
  222. T_new = T + max_new_tokens
  223. else:
  224. T_new = model.config.max_seq_len
  225. max_new_tokens = T_new - T
  226. device = prompt.device
  227. dtype = next(
  228. model.parameters()
  229. ).dtype # model weight dtype (bfloat16), NOT prompt dtype (int32)
  230. # Critical fix: Only set up cache on first run or when necessary
  231. if not hasattr(model, "_cache_setup_done") or not model._cache_setup_done:
  232. with torch.device(device):
  233. model.setup_caches(
  234. max_batch_size=1, # Fixed to 1, avoid dynamic changes
  235. max_seq_len=model.config.max_seq_len,
  236. dtype=next(model.parameters()).dtype,
  237. )
  238. model._cache_setup_done = True
  239. codebook_dim = 1 + model.config.num_codebooks
  240. # Create new tensor each time, but try to reuse memory
  241. input_pos = torch.arange(0, T, device=device, dtype=torch.long)
  242. empty = torch.empty(
  243. (codebook_dim, model.config.max_seq_len), dtype=prompt.dtype, device=device
  244. )
  245. empty[:, :T] = prompt
  246. seq = empty
  247. temp_val = sampling_kwargs.get("temperature", 1.0)
  248. top_p_val = sampling_kwargs.get("top_p", 0.9)
  249. top_k_val = sampling_kwargs.get("top_k", 30)
  250. temperature = torch.tensor(temp_val, device=device, dtype=dtype)
  251. top_p = torch.tensor(top_p_val, device=device, dtype=dtype)
  252. # Build semantic logit bias: 0 for semantic tokens + im_end, -inf for all others
  253. vocab_size = model.config.vocab_size
  254. semantic_logit_bias = torch.full(
  255. (1, 1, vocab_size), float("-inf"), device=device, dtype=dtype
  256. )
  257. # [MODIFIED] Use config for semantic range
  258. semantic_logit_bias[
  259. 0, 0, model.config.semantic_begin_id : model.config.semantic_end_id + 1
  260. ] = 0.0
  261. # [MODIFIED] Use tokenizer.get_token_id (Wrapper method)
  262. semantic_logit_bias[0, 0, model.tokenizer.get_token_id(IM_END_TOKEN)] = 0.0
  263. prefill_decode = decode_one_token_ar
  264. first_token = prefill_decode(
  265. model,
  266. prompt.view(1, codebook_dim, -1),
  267. input_pos,
  268. temperature,
  269. top_p,
  270. top_k_val,
  271. semantic_logit_bias,
  272. audio_masks,
  273. audio_parts,
  274. )
  275. seq[:, T : T + 1] = first_token
  276. # Recreate input_pos
  277. input_pos = torch.tensor([T], device=device, dtype=torch.int)
  278. x = decode_n_tokens(
  279. model,
  280. first_token.view(1, codebook_dim, -1),
  281. input_pos,
  282. max_new_tokens - 1,
  283. temperature=temperature,
  284. top_p=top_p,
  285. top_k=top_k_val,
  286. semantic_logit_bias=semantic_logit_bias,
  287. audio_masks=audio_masks,
  288. audio_parts=audio_parts,
  289. decode_one_token=decode_one_token,
  290. )
  291. seq = seq[:, : T + 1 + x.size(1)]
  292. seq[:, T + 1 :] = x
  293. # Clean up temporary variables
  294. del first_token, x, prompt, empty, input_pos
  295. return seq
  296. def init_model(checkpoint_path, device, precision, compile=False):
  297. model = DualARTransformer.from_pretrained(checkpoint_path, load_weights=True)
  298. model = model.to(device=device, dtype=precision)
  299. logger.info(f"Restored model from checkpoint")
  300. if isinstance(model, DualARTransformer):
  301. decode_one_token = decode_one_token_ar
  302. # prefill_n_tokens = decode_one_token_ar
  303. logger.info("Using DualARTransformer")
  304. else:
  305. raise ValueError("Unsupported model type")
  306. # Pre-create fixed parameter tensors to avoid runtime creation
  307. model.fixed_temperature = torch.tensor(0.7, device=device, dtype=torch.float)
  308. model.fixed_top_p = torch.tensor(0.7, device=device, dtype=torch.float)
  309. model.fixed_repetition_penalty = torch.tensor(1.5, device=device, dtype=torch.float)
  310. # Mark whether cache has been initialized
  311. model._cache_setup_done = False
  312. if compile:
  313. logger.info("Compiling function...")
  314. decode_one_token = torch.compile(
  315. decode_one_token,
  316. backend="inductor" if torch.cuda.is_available() else "aot_eager",
  317. mode="default" if torch.cuda.is_available() else None,
  318. fullgraph=True,
  319. )
  320. return model.eval(), decode_one_token
  321. @torch.inference_mode()
  322. def load_codec_model(codec_checkpoint_path, device, precision=torch.bfloat16):
  323. """Load the DAC codec model for audio encoding/decoding."""
  324. from hydra.utils import instantiate
  325. from omegaconf import OmegaConf
  326. config_path = Path(__file__).parent.parent.parent / "configs" / "modded_dac_vq.yaml"
  327. cfg = OmegaConf.load(str(config_path))
  328. codec = instantiate(cfg)
  329. state_dict = torch.load(codec_checkpoint_path, map_location="cpu")
  330. if "state_dict" in state_dict:
  331. state_dict = state_dict["state_dict"]
  332. if any("generator" in k for k in state_dict):
  333. state_dict = {
  334. k.replace("generator.", ""): v
  335. for k, v in state_dict.items()
  336. if "generator." in k
  337. }
  338. codec.load_state_dict(state_dict, strict=False)
  339. codec.eval()
  340. codec.to(device=device, dtype=precision)
  341. return codec
  342. @torch.inference_mode()
  343. def encode_audio(audio_path, codec, device):
  344. """Encode an audio file to VQ codes."""
  345. import torchaudio
  346. wav, sr = torchaudio.load(str(audio_path))
  347. if wav.shape[0] > 1:
  348. wav = wav.mean(dim=0, keepdim=True)
  349. wav = torchaudio.functional.resample(wav.to(device), sr, codec.sample_rate)[0]
  350. # Match codec model dtype (e.g. bfloat16)
  351. model_dtype = next(codec.parameters()).dtype
  352. audios = wav[None, None].to(dtype=model_dtype) # (1, 1, T)
  353. audio_lengths = torch.tensor([len(wav)], device=device, dtype=torch.long)
  354. indices, feature_lengths = codec.encode(audios, audio_lengths)
  355. return indices[0, :, : feature_lengths[0]] # (num_codebooks, T)
  356. @torch.inference_mode()
  357. def decode_to_audio(codes, codec):
  358. """Decode VQ codes to audio waveform."""
  359. # codes: (num_codebooks, T) -> (1, num_codebooks, T)
  360. audio = codec.from_indices(codes[None])
  361. return audio[0, 0] # (T,) mono waveform
  362. @dataclass
  363. class GenerateResponse:
  364. action: Literal["sample", "next"]
  365. codes: Optional[torch.Tensor] = None
  366. text: Optional[str] = None
  367. def split_text_by_speaker(text: str) -> list[str]:
  368. """
  369. Split text into turns based on <|speaker:X|> tags.
  370. Args:
  371. text: The full text with speaker tags
  372. Returns:
  373. List of speaker turns, each starting with <|speaker:X|>
  374. """
  375. pattern = r"(<\|speaker:\d+\|>)"
  376. parts = re.split(pattern, text)
  377. turns = []
  378. i = 0
  379. while i < len(parts):
  380. part = parts[i].strip()
  381. if re.match(pattern, part):
  382. if i + 1 < len(parts):
  383. turn = part + parts[i + 1]
  384. turns.append(turn.strip())
  385. i += 2
  386. else:
  387. turns.append(part)
  388. i += 1
  389. else:
  390. i += 1
  391. return turns
  392. def group_turns_into_batches(
  393. turns: list[str], max_speakers: int = 3, max_bytes: int = 300
  394. ) -> list[str]:
  395. """
  396. Group turns into batches based on speaker count or byte limit.
  397. Args:
  398. turns: List of speaker turns
  399. max_speakers: Maximum number of speakers per batch (default 3)
  400. max_bytes: Maximum UTF-8 bytes per batch (default 300)
  401. Returns:
  402. List of batched text strings
  403. """
  404. batches = []
  405. current_batch = []
  406. current_bytes = 0
  407. for turn in turns:
  408. turn_bytes = len(turn.encode("utf-8"))
  409. would_exceed_speakers = len(current_batch) >= max_speakers
  410. would_exceed_bytes = current_bytes + turn_bytes > max_bytes and current_batch
  411. if would_exceed_speakers or would_exceed_bytes:
  412. batches.append("\n".join(current_batch))
  413. current_batch = [turn]
  414. current_bytes = turn_bytes
  415. else:
  416. current_batch.append(turn)
  417. current_bytes += turn_bytes
  418. if current_batch:
  419. batches.append("\n".join(current_batch))
  420. return batches
  421. def generate_long(
  422. *,
  423. model,
  424. device: Union[str, torch.device],
  425. decode_one_token: Callable,
  426. text: str,
  427. num_samples: int = 1,
  428. max_new_tokens: int = 0,
  429. top_p: float = 0.9,
  430. top_k: int = 30,
  431. repetition_penalty: float = 1.1,
  432. temperature: float = 1.0,
  433. compile: bool = False,
  434. iterative_prompt: bool = True,
  435. chunk_length: int = 512,
  436. prompt_text: Optional[Union[str, list[str]]] = None,
  437. prompt_tokens: Optional[Union[torch.Tensor, list[torch.Tensor]]] = None,
  438. ):
  439. assert 0 < top_p <= 1, "top_p must be in (0, 1]"
  440. assert 0 < temperature < 2, "temperature must be in (0, 2)"
  441. use_prompt = bool(prompt_text) and bool(prompt_tokens)
  442. if use_prompt and isinstance(prompt_text, str):
  443. prompt_text = [prompt_text]
  444. prompt_tokens = [prompt_tokens]
  445. if use_prompt:
  446. assert len(prompt_text) == len(
  447. prompt_tokens
  448. ), "Prompt text and tokens must have the same length"
  449. if prompt_tokens:
  450. prompt_tokens = [i.cpu() for i in prompt_tokens]
  451. model_size = sum(p.numel() for p in model.parameters() if p.requires_grad)
  452. tokenizer = model.tokenizer
  453. max_length = model.config.max_seq_len
  454. # Build base conversation with system message
  455. base_conversation = Conversation()
  456. if use_prompt:
  457. # Auto-add speaker tags to prompt texts that don't have them
  458. tagged_prompt_text = []
  459. for i, t in enumerate(prompt_text):
  460. if not re.search(r"<\|speaker:\d+\|>", t):
  461. tagged_prompt_text.append(f"<|speaker:{i}|>{t}")
  462. else:
  463. tagged_prompt_text.append(t)
  464. system_parts = [
  465. TextPart(
  466. text="convert the provided text to speech reference to the following:\n\nText:\n",
  467. cal_loss=False,
  468. ),
  469. ]
  470. reference_text = "\n".join(tagged_prompt_text)
  471. system_parts.append(TextPart(text=reference_text, cal_loss=False))
  472. system_parts.append(TextPart(text="\n\nSpeech:\n", cal_loss=False))
  473. all_codes = torch.cat([c for c in prompt_tokens], dim=1)
  474. system_parts.append(VQPart(codes=all_codes, cal_loss=False))
  475. # torch.save(all_codes, "debug_vq_codes.pt")
  476. else:
  477. system_parts = [
  478. TextPart(text="convert the provided text to speech", cal_loss=False)
  479. ]
  480. base_conversation.append(
  481. Message(
  482. role="system",
  483. parts=system_parts,
  484. cal_loss=False,
  485. add_im_start=True,
  486. add_im_end=True,
  487. )
  488. )
  489. # Split text by speaker and group into batches
  490. turns = split_text_by_speaker(text)
  491. if turns:
  492. batches = group_turns_into_batches(
  493. turns, max_speakers=5, max_bytes=chunk_length
  494. )
  495. else:
  496. batches = [text]
  497. logger.info(f"Split into {len(turns)} turns, grouped into {len(batches)} batches")
  498. for sample_idx in range(num_samples):
  499. if torch.cuda.is_available():
  500. torch.cuda.synchronize()
  501. t0 = time.perf_counter()
  502. # Deep copy base conversation for this sample
  503. conversation = deepcopy(base_conversation)
  504. for batch_idx, batch_text in enumerate(batches):
  505. logger.info(
  506. f"--- Sample {sample_idx}, Batch {batch_idx} "
  507. f"({len(batch_text.encode('utf-8'))} bytes) ---"
  508. )
  509. logger.info(f"Batch text: {batch_text}")
  510. # Add user message
  511. conversation.append(
  512. Message(
  513. role="user",
  514. parts=[TextPart(text=batch_text, cal_loss=False)],
  515. cal_loss=False,
  516. add_im_start=True,
  517. add_im_end=True,
  518. )
  519. )
  520. # Deep copy for generation (don't pollute original conversation)
  521. conversation_gen = deepcopy(conversation)
  522. conversation_gen.append(
  523. Message(
  524. role="assistant",
  525. parts=[],
  526. cal_loss=False,
  527. modality="voice",
  528. add_im_start=True,
  529. add_im_end=False,
  530. )
  531. )
  532. logger.info("Visualizing prompt structure:")
  533. conversation_gen.visualize(
  534. tokenizer,
  535. merge_audio_tokens=True,
  536. merge_semantic_tokens=True,
  537. )
  538. encoded, audio_masks, audio_parts = conversation_gen.encode_for_inference(
  539. tokenizer, num_codebooks=model.config.num_codebooks
  540. )
  541. logger.info(f"Encoded prompt shape: {encoded.shape}")
  542. if audio_parts is not None:
  543. logger.info(f"Audio parts shape: {audio_parts.shape}")
  544. if audio_masks is not None:
  545. logger.info(
  546. f"Audio masks non-zero count: {torch.count_nonzero(audio_masks)}"
  547. )
  548. if encoded.size(1) > max_length - 2048:
  549. raise ValueError(
  550. f"Prompt is too long: {encoded.size(1)} > {max_length - 2048}"
  551. )
  552. encoded = encoded.to(device=device)
  553. prompt_length = encoded.size(1)
  554. y = generate(
  555. model=model,
  556. prompt=encoded,
  557. max_new_tokens=max_new_tokens,
  558. audio_masks=audio_masks,
  559. audio_parts=audio_parts,
  560. decode_one_token=decode_one_token,
  561. temperature=temperature,
  562. top_p=top_p,
  563. top_k=top_k,
  564. )
  565. if sample_idx == 0 and batch_idx == 0 and compile:
  566. logger.info(f"Compilation time: {time.perf_counter() - t0:.2f} seconds")
  567. if torch.cuda.is_available():
  568. torch.cuda.synchronize()
  569. t_batch = time.perf_counter() - t0
  570. tokens_generated = y.size(1) - prompt_length
  571. tokens_sec = tokens_generated / t_batch if t_batch > 0 else 0
  572. logger.info(
  573. f"Batch {batch_idx}: Generated {tokens_generated} tokens in "
  574. f"{t_batch:.02f} seconds, {tokens_sec:.02f} tokens/sec"
  575. )
  576. logger.info(
  577. f"Bandwidth achieved: {model_size * tokens_sec / 1e9:.02f} GB/s"
  578. )
  579. # Extract generated codes
  580. codes = y[1:, prompt_length:-1].clone()
  581. assert (codes >= 0).all(), f"Negative code found: {codes}"
  582. # Add assistant message with generated codes back to conversation
  583. conversation.append(
  584. Message(
  585. role="assistant",
  586. parts=[VQPart(codes=codes.cpu(), cal_loss=False)],
  587. cal_loss=False,
  588. modality="voice",
  589. add_im_start=True,
  590. add_im_end=True,
  591. )
  592. )
  593. yield GenerateResponse(action="sample", codes=codes, text=batch_text)
  594. # Cleanup
  595. del y, encoded
  596. if torch.cuda.is_available():
  597. logger.info(
  598. f"GPU Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB"
  599. )
  600. yield GenerateResponse(action="next")
  601. @dataclass
  602. class WrappedGenerateResponse:
  603. status: Literal["success", "error"]
  604. response: Optional[Union[GenerateResponse, Exception]] = None
  605. @dataclass
  606. class GenerateRequest:
  607. request: dict
  608. response_queue: queue.Queue
  609. def launch_thread_safe_queue(
  610. checkpoint_path,
  611. device,
  612. precision,
  613. compile: bool = False,
  614. ):
  615. input_queue = queue.Queue()
  616. init_event = threading.Event()
  617. def worker():
  618. model, decode_one_token = init_model(
  619. checkpoint_path, device, precision, compile=compile
  620. )
  621. with torch.device(device):
  622. model.setup_caches(
  623. max_batch_size=1,
  624. max_seq_len=model.config.max_seq_len,
  625. dtype=next(model.parameters()).dtype,
  626. )
  627. init_event.set()
  628. while True:
  629. item: GenerateRequest | None = input_queue.get()
  630. if item is None:
  631. break
  632. kwargs = item.request
  633. response_queue = item.response_queue
  634. try:
  635. for chunk in generate_long(
  636. model=model, decode_one_token=decode_one_token, **kwargs
  637. ):
  638. response_queue.put(
  639. WrappedGenerateResponse(status="success", response=chunk)
  640. )
  641. # Only clear cache after complete request batch
  642. if torch.cuda.is_available():
  643. torch.cuda.empty_cache()
  644. except Exception as e:
  645. logger.error(traceback.format_exc())
  646. response_queue.put(WrappedGenerateResponse(status="error", response=e))
  647. # Clear cache on error
  648. if torch.cuda.is_available():
  649. torch.cuda.empty_cache()
  650. threading.Thread(target=worker, daemon=True).start()
  651. init_event.wait()
  652. return input_queue
  653. @click.command()
  654. @click.option(
  655. "--text",
  656. type=str,
  657. default="<|speaker:0|>你说的对, 但是原神是一款由米哈游自主研发的开放世界手游.",
  658. )
  659. @click.option("--prompt-text", type=str, default=None, multiple=True)
  660. @click.option(
  661. "--prompt-tokens",
  662. type=click.Path(path_type=Path, exists=True),
  663. default=None,
  664. multiple=True,
  665. )
  666. @click.option(
  667. "--prompt-audio",
  668. type=click.Path(path_type=Path, exists=True),
  669. default=None,
  670. multiple=True,
  671. )
  672. @click.option("--output", type=click.Path(path_type=Path), default=None)
  673. @click.option("--num-samples", type=int, default=1)
  674. @click.option("--max-new-tokens", type=int, default=0)
  675. @click.option("--top-p", type=float, default=0.9)
  676. @click.option("--top-k", type=int, default=30)
  677. @click.option("--temperature", type=float, default=1.0)
  678. @click.option(
  679. "--checkpoint-path",
  680. type=click.Path(path_type=Path, exists=True),
  681. default="checkpoints/s2-pro",
  682. )
  683. @click.option("--device", type=str, default="cuda")
  684. @click.option("--compile/--no-compile", default=False)
  685. @click.option("--seed", type=int, default=42)
  686. @click.option("--half/--no-half", default=False)
  687. @click.option("--iterative-prompt/--no-iterative-prompt", default=True)
  688. @click.option("--chunk-length", type=int, default=300)
  689. @click.option("--output-dir", type=Path, default="output")
  690. def main(
  691. text: str,
  692. prompt_text: Optional[tuple[str, ...]],
  693. prompt_tokens: Optional[tuple[Path, ...]],
  694. prompt_audio: Optional[tuple[Path, ...]],
  695. output: Optional[Path],
  696. num_samples: int,
  697. max_new_tokens: int,
  698. top_p: float,
  699. top_k: int,
  700. temperature: float,
  701. checkpoint_path: Path,
  702. device: str,
  703. compile: bool,
  704. seed: int,
  705. half: bool,
  706. iterative_prompt: bool,
  707. chunk_length: int,
  708. output_dir: Path,
  709. ) -> None:
  710. os.makedirs(output_dir, exist_ok=True)
  711. precision = torch.half if half else torch.bfloat16
  712. if prompt_text and not prompt_audio and not prompt_tokens:
  713. raise ValueError(
  714. "--prompt-text requires either --prompt-audio or --prompt-tokens"
  715. )
  716. if prompt_text and prompt_tokens and len(prompt_text) != len(prompt_tokens):
  717. raise ValueError(
  718. f"Number of prompt text ({len(prompt_text)}) and prompt tokens ({len(prompt_tokens)}) should be the same"
  719. )
  720. if prompt_text and prompt_audio and len(prompt_text) != len(prompt_audio):
  721. raise ValueError(
  722. f"Number of prompt text ({len(prompt_text)}) and prompt audio ({len(prompt_audio)}) should be the same"
  723. )
  724. logger.info("Loading model ...")
  725. t0 = time.time()
  726. model, decode_one_token = init_model(
  727. checkpoint_path, device, precision, compile=compile
  728. )
  729. with torch.device(device):
  730. model.setup_caches(
  731. max_batch_size=1,
  732. max_seq_len=model.config.max_seq_len,
  733. dtype=next(model.parameters()).dtype,
  734. )
  735. if torch.cuda.is_available():
  736. torch.cuda.synchronize()
  737. logger.info(f"Time to load model: {time.time() - t0:.02f} seconds")
  738. codec = None
  739. codec_checkpoint = checkpoint_path / "codec.pth"
  740. # Handle prompt: --prompt-audio takes priority over --prompt-tokens
  741. prompt_tokens_list = None
  742. if prompt_audio:
  743. logger.info("Loading codec model for audio encoding...")
  744. codec = load_codec_model(codec_checkpoint, device, precision)
  745. prompt_tokens_list = [
  746. encode_audio(p, codec, device).cpu() for p in prompt_audio
  747. ]
  748. logger.info(f"Encoded {len(prompt_audio)} audio file(s) to VQ codes")
  749. elif prompt_tokens is not None:
  750. prompt_tokens_list = [torch.from_numpy(np.load(p)) for p in prompt_tokens]
  751. torch.manual_seed(seed)
  752. if torch.cuda.is_available():
  753. torch.cuda.manual_seed(seed)
  754. generator = generate_long(
  755. model=model,
  756. device=device,
  757. decode_one_token=decode_one_token,
  758. text=text,
  759. num_samples=num_samples,
  760. max_new_tokens=max_new_tokens,
  761. top_p=top_p,
  762. top_k=top_k,
  763. temperature=temperature,
  764. compile=compile,
  765. iterative_prompt=iterative_prompt,
  766. chunk_length=chunk_length,
  767. prompt_text=list(prompt_text) if prompt_text else None,
  768. prompt_tokens=prompt_tokens_list,
  769. )
  770. idx = 0
  771. codes = []
  772. for response in generator:
  773. if response.action == "sample":
  774. codes.append(response.codes)
  775. logger.info(f"Sampled text: {response.text}")
  776. elif response.action == "next":
  777. if codes:
  778. merged_codes = torch.cat(codes, dim=1)
  779. codes_npy_path = os.path.join(output_dir, f"codes_{idx}.npy")
  780. np.save(codes_npy_path, merged_codes.cpu().numpy())
  781. logger.info(f"Saved codes to {codes_npy_path}")
  782. # Decode to wav if --output is specified
  783. if output:
  784. if codec is None:
  785. logger.info("Loading codec model for audio decoding...")
  786. codec = load_codec_model(codec_checkpoint, device, precision)
  787. audio = decode_to_audio(merged_codes.to(device), codec)
  788. import soundfile as sf
  789. out_path = (
  790. str(output)
  791. if num_samples == 1
  792. else str(output.with_stem(f"{output.stem}_{idx}"))
  793. )
  794. sf.write(out_path, audio.cpu().float().numpy(), codec.sample_rate)
  795. logger.info(f"Saved audio to {out_path}")
  796. logger.info(f"Next sample")
  797. codes = []
  798. idx += 1
  799. else:
  800. logger.error(f"Error: {response}")
  801. if __name__ == "__main__":
  802. main()