generate.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479
  1. import os
  2. import time
  3. from pathlib import Path
  4. from typing import Optional, Tuple
  5. import click
  6. import numpy as np
  7. import torch
  8. import torch._dynamo.config
  9. import torch._inductor.config
  10. from hydra import compose, initialize
  11. from hydra.utils import instantiate
  12. from loguru import logger
  13. from tqdm import tqdm
  14. from transformers import AutoTokenizer
  15. from fish_speech.text.parser import clean_text
  16. os.environ["TOKENIZERS_PARALLELISM"] = "false"
  17. torch._inductor.config.coordinate_descent_tuning = True
  18. torch._inductor.config.triton.unique_kernel_names = True
  19. if hasattr(torch._inductor.config, "fx_graph_cache"):
  20. # Experimental feature to reduce compilation times, will be on by default in future
  21. torch._inductor.config.fx_graph_cache = True
  22. from fish_speech.models.text2semantic.llama import Transformer
  23. from fish_speech.text import g2p
  24. from fish_speech.text.symbols import pad as pad_symbol
  25. from fish_speech.text.symbols import pu_symbols
  26. def multinomial_sample_one_no_sync(
  27. probs_sort,
  28. ): # Does multinomial sampling without a cuda synchronization
  29. q = torch.empty_like(probs_sort).exponential_(1)
  30. return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
  31. def logits_to_probs(
  32. logits,
  33. previous_tokens: Optional[torch.Tensor] = None,
  34. temperature: float = 1.0,
  35. top_k: Optional[int] = None,
  36. top_p: Optional[int] = None,
  37. repetition_penalty: float = 1.0,
  38. ):
  39. if previous_tokens is not None and repetition_penalty != 1.0:
  40. previous_tokens = previous_tokens.long()
  41. score = torch.gather(logits, dim=0, index=previous_tokens)
  42. score = torch.where(
  43. score < 0, score * repetition_penalty, score / repetition_penalty
  44. )
  45. logits.scatter_(dim=0, index=previous_tokens, src=score)
  46. if top_p is not None and top_p < 1.0:
  47. sorted_logits, sorted_indices = torch.sort(logits, descending=True)
  48. cum_probs = torch.cumsum(
  49. torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1
  50. )
  51. sorted_indices_to_remove = cum_probs > top_p
  52. sorted_indices_to_remove[0] = False # keep at least one option
  53. indices_to_remove = sorted_indices_to_remove.scatter(
  54. dim=0, index=sorted_indices, src=sorted_indices_to_remove
  55. )
  56. logits = logits.masked_fill(indices_to_remove, -float("Inf"))
  57. logits = logits / max(temperature, 1e-5)
  58. if top_k is not None:
  59. v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
  60. pivot = v.select(-1, -1).unsqueeze(-1)
  61. logits = torch.where(logits < pivot, -float("Inf"), logits)
  62. probs = torch.nn.functional.softmax(logits, dim=-1)
  63. return probs
  64. def sample(
  65. logits,
  66. previous_tokens: Optional[torch.Tensor] = None,
  67. **sampling_kwargs,
  68. ) -> Tuple[torch.Tensor, torch.Tensor]:
  69. probs = logits_to_probs(
  70. logits=logits[0, -1], previous_tokens=previous_tokens, **sampling_kwargs
  71. )
  72. idx_next = multinomial_sample_one_no_sync(probs)
  73. return idx_next, probs
  74. def decode_one_token(
  75. model: Transformer,
  76. x: torch.Tensor,
  77. input_pos: torch.Tensor,
  78. previous_tokens: torch.Tensor = None,
  79. **sampling_kwargs,
  80. ) -> torch.Tensor:
  81. assert input_pos.shape[-1] == 1
  82. logits = model.forward_generate(x, input_pos)
  83. codebooks = [
  84. sample(
  85. logits.token_logits,
  86. previous_tokens=None, # Disable repetition penalty for the token codebook
  87. **sampling_kwargs,
  88. )[0]
  89. ]
  90. # Disable <s> and </s> tokens for codebooks
  91. if model.config.num_codebooks != 0:
  92. logits.codebook_logits[:, :, :, :2] = -float("Inf")
  93. for i in range(model.config.num_codebooks):
  94. codebooks.append(
  95. sample(
  96. logits.codebook_logits[:, :, i],
  97. previous_tokens=previous_tokens[i + 1]
  98. if previous_tokens is not None
  99. else None,
  100. **sampling_kwargs,
  101. )[0]
  102. )
  103. return torch.stack(codebooks, dim=0)
  104. def prefill(
  105. model: Transformer, x: torch.Tensor, input_pos: torch.Tensor, **sampling_kwargs
  106. ) -> torch.Tensor:
  107. # input_pos: [B, S]
  108. logits = model.forward_generate(x, input_pos)
  109. codebooks = [
  110. sample(
  111. logits.token_logits,
  112. previous_tokens=None,
  113. **sampling_kwargs,
  114. )[0]
  115. ]
  116. # Disable <s> and </s> tokens for codebooks
  117. if model.config.num_codebooks != 0:
  118. logits.codebook_logits[:, :, :, :2] = -float("Inf")
  119. for i in range(model.config.num_codebooks):
  120. codebooks.append(
  121. sample(
  122. logits.codebook_logits[:, :, i],
  123. previous_tokens=None,
  124. **sampling_kwargs,
  125. )[0]
  126. )
  127. return torch.stack(codebooks, dim=0)
  128. def decode_n_tokens(
  129. model: Transformer,
  130. cur_token: torch.Tensor,
  131. input_pos: torch.Tensor,
  132. num_new_tokens: int,
  133. eos_token_id: int = 2,
  134. **sampling_kwargs,
  135. ):
  136. previous_tokens = torch.zeros(
  137. (model.config.num_codebooks + 1, num_new_tokens),
  138. dtype=torch.int,
  139. device=cur_token.device,
  140. )
  141. for i in tqdm(range(num_new_tokens)):
  142. # We need to get windowed repeat penalty
  143. win_size = 16
  144. if i < win_size:
  145. window = previous_tokens[:, :win_size]
  146. else:
  147. window = previous_tokens[:, i - win_size : i]
  148. with torch.backends.cuda.sdp_kernel(
  149. enable_flash=False, enable_mem_efficient=False, enable_math=True
  150. ): # Actually better for Inductor to codegen attention here
  151. next_token = decode_one_token(
  152. model,
  153. cur_token,
  154. input_pos,
  155. window,
  156. **sampling_kwargs,
  157. )
  158. input_pos += 1
  159. cur_token = next_token.view(1, model.config.num_codebooks + 1, -1)
  160. previous_tokens[:, i : i + 1] = next_token.view(
  161. model.config.num_codebooks + 1, -1
  162. )
  163. # TODO: use tokenizer's eos
  164. if (cur_token[0, 0, -1] == eos_token_id).any():
  165. break
  166. return previous_tokens[:, : i + 1]
  167. @torch.no_grad()
  168. def generate(
  169. *,
  170. model: Transformer,
  171. prompt: torch.Tensor,
  172. max_new_tokens: int,
  173. eos_token_id: int = 2,
  174. **sampling_kwargs,
  175. ) -> torch.Tensor:
  176. """
  177. Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested.
  178. """
  179. # create an empty tensor of the expected final shape and fill in the current tokens
  180. T = prompt.size(1)
  181. if max_new_tokens:
  182. if T + max_new_tokens > model.config.max_seq_len:
  183. max_new_tokens = model.config.max_seq_len - T
  184. logger.info(f"Truncating max_new_tokens to {max_new_tokens}")
  185. T_new = T + max_new_tokens
  186. else:
  187. T_new = model.config.max_seq_len
  188. max_new_tokens = T_new - T
  189. device, dtype = prompt.device, prompt.dtype
  190. with torch.device(device):
  191. model.setup_caches(max_batch_size=1, max_seq_len=T_new)
  192. codebook_dim = 1 + model.config.num_codebooks
  193. # create an empty tensor of the expected final shape and fill in the current tokens
  194. empty = torch.empty((codebook_dim, T_new), dtype=dtype, device=device)
  195. empty[:, :T] = prompt
  196. seq = empty
  197. input_pos = torch.arange(0, T, device=device)
  198. next_token = prefill(
  199. model, prompt.view(1, codebook_dim, -1), input_pos, **sampling_kwargs
  200. )
  201. seq[:, T : T + 1] = next_token
  202. input_pos = torch.tensor([T], device=device, dtype=torch.int)
  203. x = decode_n_tokens(
  204. model,
  205. next_token.view(1, codebook_dim, -1),
  206. input_pos,
  207. max_new_tokens - 1,
  208. eos_token_id=eos_token_id,
  209. **sampling_kwargs,
  210. )
  211. # x = torch.cat(generated_tokens, dim=1)
  212. seq = seq[:, : T + 1 + x.size(1)]
  213. seq[:, T + 1 :] = x
  214. return seq
  215. def encode_tokens(
  216. tokenizer,
  217. string,
  218. bos=True,
  219. device="cuda",
  220. prompt_text=None,
  221. prompt_tokens=None,
  222. use_g2p=False,
  223. speaker=None,
  224. ):
  225. if prompt_text is not None:
  226. string = prompt_text + " " + string
  227. if use_g2p:
  228. prompt = g2p(string)
  229. prompt = [
  230. (f"<p:{i}>" if i not in pu_symbols and i != pad_symbol else i)
  231. for _, i in prompt
  232. ]
  233. string = " ".join(prompt)
  234. else:
  235. string = clean_text(string)
  236. if speaker is not None:
  237. string = f"[SPK: {speaker}] {string}"
  238. string = f"[INST] {string} [/INST]"
  239. tokens = tokenizer.encode(
  240. string,
  241. max_length=10**6,
  242. add_special_tokens=bos,
  243. truncation=False,
  244. )
  245. tokens = torch.tensor([tokens], dtype=torch.int, device=device)
  246. # Codebooks
  247. zeros = torch.zeros((4, tokens.size(1)), dtype=torch.int, device=device)
  248. prompt = torch.cat((tokens, zeros), dim=0)
  249. if prompt_tokens is None:
  250. return prompt
  251. # Get prompt tokens
  252. assert prompt_tokens.ndim == 2
  253. data = prompt_tokens + 2
  254. zeros = (
  255. torch.zeros((1, data.size(1)), dtype=torch.int, device=device)
  256. + tokenizer.pad_token_id
  257. ) # 32311 is the <pad> token
  258. data = torch.cat((zeros, data), dim=0)
  259. prompt = torch.cat((prompt, data), dim=1)
  260. return prompt
  261. def load_model(config_name, checkpoint_path, device, precision):
  262. with initialize(version_base="1.3", config_path="../../fish_speech/configs"):
  263. cfg = compose(config_name=config_name)
  264. with torch.device("meta"):
  265. model: Transformer = instantiate(cfg.model.model)
  266. if "int8" in str(checkpoint_path):
  267. logger.info("Using int8 weight-only quantization!")
  268. from quantize import WeightOnlyInt8QuantHandler
  269. simple_quantizer = WeightOnlyInt8QuantHandler(model)
  270. model = simple_quantizer.convert_for_runtime()
  271. if "int4" in str(checkpoint_path):
  272. logger.info("Using int4 quantization!")
  273. path_comps = checkpoint_path.name.split(".")
  274. assert path_comps[-2].startswith("g")
  275. groupsize = int(path_comps[-2][1:])
  276. from quantize import WeightOnlyInt4QuantHandler
  277. simple_quantizer = WeightOnlyInt4QuantHandler(model, groupsize)
  278. model = simple_quantizer.convert_for_runtime()
  279. checkpoint = torch.load(str(checkpoint_path), map_location="cpu")
  280. if "state_dict" in checkpoint:
  281. checkpoint = checkpoint["state_dict"]
  282. if any(k.startswith("model.") for k in checkpoint):
  283. checkpoint = {
  284. k.replace("model.", ""): v
  285. for k, v in checkpoint.items()
  286. if k.startswith("model.")
  287. }
  288. model.load_state_dict(checkpoint, assign=True)
  289. model = model.to(device=device, dtype=precision)
  290. logger.info("Restored model from checkpoint")
  291. return model.eval()
  292. @click.command()
  293. @click.option("--text", type=str, default="你说的对, 但是原神是一款由米哈游自主研发的开放世界手游.")
  294. @click.option("--prompt-text", type=str, default=None)
  295. @click.option(
  296. "--prompt-tokens", type=click.Path(path_type=Path, exists=True), default=None
  297. )
  298. @click.option("--num-samples", type=int, default=1)
  299. @click.option("--max_new_tokens", type=int, default=0)
  300. @click.option("--top-k", type=int, default=None)
  301. @click.option("--top-p", type=float, default=0.5)
  302. @click.option("--repetition-penalty", type=float, default=1.5)
  303. @click.option("--temperature", type=float, default=0.7)
  304. @click.option(
  305. "--checkpoint-path",
  306. type=click.Path(path_type=Path, exists=True),
  307. default="results/text2semantic_400m_finetune/step_000002000.pth",
  308. )
  309. @click.option("--config-name", type=str, default="text2semantic_finetune")
  310. @click.option("--tokenizer", type=str, default="fishaudio/speech-lm-v1")
  311. @click.option("--compile/--no-compile", default=False)
  312. @click.option("--use-g2p/--no-g2p", default=True)
  313. @click.option("--seed", type=int, default=42)
  314. @click.option("--speaker", type=str, default=None)
  315. def main(
  316. text: str,
  317. prompt_text: Optional[str],
  318. prompt_tokens: Optional[Path],
  319. num_samples: int,
  320. max_new_tokens: int,
  321. top_k: int,
  322. top_p: int,
  323. repetition_penalty: float,
  324. temperature: float,
  325. checkpoint_path: Path,
  326. config_name: str,
  327. tokenizer: str,
  328. compile: bool,
  329. use_g2p: bool,
  330. seed: int,
  331. speaker: Optional[str],
  332. ) -> None:
  333. device = "cuda"
  334. precision = torch.bfloat16
  335. logger.info("Loading model ...")
  336. t0 = time.time()
  337. model = load_model(config_name, checkpoint_path, device, precision)
  338. model_size = sum(p.numel() for p in model.parameters() if p.requires_grad)
  339. torch.cuda.synchronize()
  340. logger.info(f"Time to load model: {time.time() - t0:.02f} seconds")
  341. tokenizer = AutoTokenizer.from_pretrained(tokenizer)
  342. prompt_tokens = (
  343. torch.from_numpy(np.load(prompt_tokens)).to(device)
  344. if prompt_tokens is not None
  345. else None
  346. )
  347. encoded = encode_tokens(
  348. tokenizer,
  349. text,
  350. prompt_text=prompt_text,
  351. prompt_tokens=prompt_tokens,
  352. bos=True,
  353. device=device,
  354. use_g2p=use_g2p,
  355. speaker=speaker,
  356. )
  357. prompt_length = encoded.size(1)
  358. logger.info(f"Encoded prompt shape: {encoded.shape}")
  359. torch.manual_seed(seed)
  360. if compile:
  361. global decode_one_token
  362. decode_one_token = torch.compile(
  363. decode_one_token, mode="reduce-overhead", fullgraph=True
  364. )
  365. for i in range(num_samples):
  366. torch.cuda.synchronize()
  367. t0 = time.perf_counter()
  368. y = generate(
  369. model=model,
  370. prompt=encoded,
  371. max_new_tokens=max_new_tokens,
  372. eos_token_id=tokenizer.eos_token_id,
  373. temperature=temperature,
  374. top_k=top_k,
  375. top_p=top_p,
  376. repetition_penalty=repetition_penalty,
  377. )
  378. if i == 0 and compile:
  379. logger.info(f"Compilation time: {time.perf_counter() - t0:.2f} seconds")
  380. torch.cuda.synchronize()
  381. t = time.perf_counter() - t0
  382. tokens_generated = y.size(1) - prompt_length
  383. tokens_sec = tokens_generated / t
  384. logger.info(
  385. f"Generated {tokens_generated} tokens in {t:.02f} seconds, {tokens_sec:.02f} tokens/sec"
  386. )
  387. logger.info(f"Bandwidth achieved: {model_size * tokens_sec / 1e9:.02f} GB/s")
  388. logger.info(
  389. f"GPU Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB"
  390. )
  391. codes = y[1:, prompt_length:-1]
  392. codes = codes - 2
  393. assert (codes >= 0).all(), "Codes should be >= 0"
  394. np.save(f"codes_{i}.npy", codes.cpu().numpy())
  395. logger.info(f"Saved codes to codes_{i}.npy")
  396. if __name__ == "__main__":
  397. main()