llama.py 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038
  1. import dataclasses
  2. import json
  3. import math
  4. from collections import OrderedDict
  5. from dataclasses import dataclass
  6. from pathlib import Path
  7. from typing import Optional
  8. import torch
  9. import torch.nn as nn
  10. from einops import rearrange
  11. from loguru import logger
  12. from torch import Tensor
  13. from torch.nn import functional as F
  14. from torch.nn.attention import SDPBackend, sdpa_kernel
  15. from torch.utils.checkpoint import checkpoint
  16. from fish_speech.models.text2semantic.lora import LoraConfig, setup_lora
  17. def find_multiple(n: int, k: int) -> int:
  18. if n % k == 0:
  19. return n
  20. return n + k - (n % k)
  21. @dataclass
  22. class BaseModelArgs:
  23. model_type: str = "base"
  24. vocab_size: int = 32000
  25. n_layer: int = 32
  26. n_head: int = 32
  27. dim: int = 4096
  28. intermediate_size: int = None
  29. n_local_heads: int = -1
  30. head_dim: int = 64
  31. rope_base: float = 10000
  32. norm_eps: float = 1e-5
  33. max_seq_len: int = 2048
  34. dropout: float = 0.0
  35. tie_word_embeddings: bool = True
  36. attention_qkv_bias: bool = False
  37. attention_o_bias: bool = False
  38. attention_qk_norm: bool = False
  39. # Codebook configs
  40. codebook_size: int = 160
  41. num_codebooks: int = 4
  42. semantic_begin_id: int = 0
  43. semantic_end_id: int = 0
  44. # Gradient checkpointing
  45. use_gradient_checkpointing: bool = True
  46. # Initialize the model
  47. initializer_range: float = 0.02
  48. # Dummy vars
  49. is_reward_model: bool = False
  50. scale_codebook_embeddings: bool = False
  51. audio_embed_dim: Optional[int] = None
  52. def __post_init__(self):
  53. if self.n_local_heads == -1:
  54. self.n_local_heads = self.n_head
  55. if self.intermediate_size is None:
  56. hidden_dim = 4 * self.dim
  57. n_hidden = int(2 * hidden_dim / 3)
  58. self.intermediate_size = find_multiple(n_hidden, 256)
  59. if self.head_dim is None:
  60. self.head_dim = self.dim // self.n_head
  61. @staticmethod
  62. def from_pretrained(path: str):
  63. path = Path(path)
  64. if path.is_dir():
  65. path = path / "config.json"
  66. with open(path, "r", encoding="utf-8") as f:
  67. data = json.load(f)
  68. match data["model_type"]:
  69. case "naive":
  70. cls = NaiveModelArgs
  71. case "dual_ar":
  72. cls = DualARModelArgs
  73. case "fish_qwen3_omni":
  74. return BaseModelArgs._from_fish_qwen3_omni(data)
  75. case _:
  76. raise ValueError(f"Unknown model type: {data['model_type']}")
  77. # Filter out unexpected keyword arguments
  78. valid_keys = {f.name for f in dataclasses.fields(cls)}
  79. data = {k: v for k, v in data.items() if k in valid_keys}
  80. return cls(**data)
  81. @staticmethod
  82. def _from_fish_qwen3_omni(data: dict) -> "DualARModelArgs":
  83. tc = data["text_config"]
  84. adc = data["audio_decoder_config"]
  85. flat = dict(
  86. model_type="dual_ar",
  87. vocab_size=tc["vocab_size"],
  88. n_layer=tc["n_layer"],
  89. n_head=tc["n_head"],
  90. n_local_heads=tc.get("n_local_heads", -1),
  91. head_dim=tc.get("head_dim"),
  92. dim=tc["dim"],
  93. intermediate_size=tc.get("intermediate_size"),
  94. rope_base=tc.get("rope_base", 10000),
  95. norm_eps=tc.get("norm_eps", 1e-5),
  96. max_seq_len=tc.get("max_seq_len", 2048),
  97. dropout=tc.get("dropout", 0.0),
  98. tie_word_embeddings=tc.get("tie_word_embeddings", True),
  99. attention_qkv_bias=tc.get("attention_qkv_bias", False),
  100. attention_o_bias=tc.get("attention_o_bias", False),
  101. attention_qk_norm=tc.get("attention_qk_norm", False),
  102. use_gradient_checkpointing=tc.get("use_gradient_checkpointing", True),
  103. initializer_range=tc.get("initializer_range", 0.02),
  104. semantic_begin_id=data.get("semantic_start_token_id", 0),
  105. semantic_end_id=data.get("semantic_end_token_id", 0),
  106. scale_codebook_embeddings=True,
  107. norm_fastlayer_input=True,
  108. audio_embed_dim=adc.get("text_dim", tc["dim"]),
  109. codebook_size=adc["vocab_size"],
  110. num_codebooks=adc["num_codebooks"],
  111. n_fast_layer=adc["n_layer"],
  112. fast_dim=adc.get("dim"),
  113. fast_n_head=adc.get("n_head"),
  114. fast_n_local_heads=adc.get("n_local_heads"),
  115. fast_head_dim=adc.get("head_dim"),
  116. fast_intermediate_size=adc.get("intermediate_size"),
  117. fast_attention_qkv_bias=adc.get("attention_qkv_bias"),
  118. fast_attention_qk_norm=adc.get("attention_qk_norm"),
  119. fast_attention_o_bias=adc.get("attention_o_bias"),
  120. )
  121. valid_keys = {f.name for f in dataclasses.fields(DualARModelArgs)}
  122. flat = {k: v for k, v in flat.items() if k in valid_keys and v is not None}
  123. return DualARModelArgs(**flat)
  124. def save(self, path: str):
  125. with open(path, "w") as f:
  126. json.dump(self.__dict__, f, indent=4, sort_keys=True, ensure_ascii=False)
  127. @dataclass
  128. class NaiveModelArgs(BaseModelArgs):
  129. model_type: str = "naive"
  130. @dataclass
  131. class DualARModelArgs(BaseModelArgs):
  132. model_type: str = "dual_ar"
  133. n_fast_layer: int = 4
  134. fast_dim: int | None = None
  135. fast_n_head: int | None = None
  136. fast_n_local_heads: int | None = None
  137. fast_head_dim: int | None = None
  138. fast_intermediate_size: int | None = None
  139. fast_attention_qkv_bias: bool | None = None
  140. fast_attention_qk_norm: bool | None = None
  141. fast_attention_o_bias: bool | None = None
  142. norm_fastlayer_input: bool = False
  143. def __post_init__(self):
  144. super().__post_init__()
  145. self.fast_dim = self.fast_dim or self.dim
  146. self.fast_n_head = self.fast_n_head or self.n_head
  147. self.fast_n_local_heads = self.fast_n_local_heads or self.n_local_heads
  148. self.fast_head_dim = self.fast_head_dim or self.head_dim
  149. self.fast_intermediate_size = (
  150. self.fast_intermediate_size or self.intermediate_size
  151. )
  152. self.fast_attention_qkv_bias = (
  153. self.fast_attention_qkv_bias
  154. if self.fast_attention_qkv_bias is not None
  155. else self.attention_qkv_bias
  156. )
  157. self.fast_attention_qk_norm = (
  158. self.fast_attention_qk_norm
  159. if self.fast_attention_qk_norm is not None
  160. else self.attention_qk_norm
  161. )
  162. self.fast_attention_o_bias = (
  163. self.fast_attention_o_bias
  164. if self.fast_attention_o_bias is not None
  165. else self.attention_o_bias
  166. )
  167. class KVCache(nn.Module):
  168. def __init__(
  169. self, max_batch_size, max_seq_len, n_heads, head_dim, dtype=torch.bfloat16
  170. ):
  171. super().__init__()
  172. cache_shape = (max_batch_size, n_heads, max_seq_len, head_dim)
  173. self.register_buffer("k_cache", torch.zeros(cache_shape, dtype=dtype))
  174. self.register_buffer("v_cache", torch.zeros(cache_shape, dtype=dtype))
  175. def update(self, input_pos, k_val, v_val):
  176. # input_pos: [S], k_val: [B, H, S, D]
  177. assert input_pos.shape[0] == k_val.shape[2]
  178. k_out = self.k_cache
  179. v_out = self.v_cache
  180. k_out[:, :, input_pos] = k_val
  181. v_out[:, :, input_pos] = v_val
  182. return k_out, v_out
  183. @dataclass
  184. class TransformerForwardResult:
  185. token_logits: Tensor
  186. codebook_logits: Tensor
  187. @dataclass
  188. class BaseTransformerForwardResult:
  189. logits: Tensor
  190. hidden_states: Tensor
  191. def _remap_fish_qwen3_omni_keys(weights: OrderedDict) -> OrderedDict:
  192. if not any(k.startswith(("text_model.", "audio_decoder.")) for k in weights):
  193. return weights
  194. new_weights = OrderedDict()
  195. for k, v in weights.items():
  196. if k.startswith("text_model.model."):
  197. new_key = k[len("text_model.model.") :]
  198. elif k.startswith("audio_decoder."):
  199. suffix = k[len("audio_decoder.") :]
  200. new_key = (
  201. suffix
  202. if suffix.startswith("codebook_embeddings.")
  203. else "fast_" + suffix
  204. )
  205. else:
  206. new_key = k
  207. new_weights[new_key] = v
  208. return new_weights
  209. class BaseTransformer(nn.Module):
  210. def __init__(
  211. self,
  212. config: BaseModelArgs,
  213. init_weights: bool = True,
  214. ) -> None:
  215. super().__init__()
  216. self.config = config
  217. # Slow transformer
  218. self.embeddings = nn.Embedding(
  219. config.vocab_size,
  220. config.dim,
  221. )
  222. self.codebook_embeddings = nn.Embedding(
  223. config.codebook_size * config.num_codebooks,
  224. config.dim,
  225. )
  226. self.layers = nn.ModuleList(
  227. TransformerBlock(config, use_sdpa=True) for _ in range(config.n_layer)
  228. )
  229. self.norm = RMSNorm(config.dim, eps=config.norm_eps)
  230. if self.config.tie_word_embeddings is False:
  231. self.output = nn.Linear(
  232. config.dim,
  233. config.vocab_size,
  234. bias=False,
  235. )
  236. self.register_buffer(
  237. "freqs_cis",
  238. precompute_freqs_cis(
  239. config.max_seq_len,
  240. config.head_dim,
  241. config.rope_base,
  242. ),
  243. persistent=False,
  244. )
  245. self.register_buffer(
  246. "causal_mask",
  247. torch.tril(
  248. torch.ones(
  249. config.max_seq_len,
  250. config.max_seq_len,
  251. dtype=torch.bool,
  252. )
  253. ),
  254. persistent=False,
  255. )
  256. # For kv cache
  257. self.max_batch_size = -1
  258. self.max_seq_len = -1
  259. if init_weights:
  260. self.apply(self._init_weights)
  261. def setup_caches(
  262. self, max_batch_size: int, max_seq_len: int, dtype: torch.dtype = torch.bfloat16
  263. ):
  264. if self.max_seq_len >= max_seq_len and self.max_batch_size >= max_batch_size:
  265. return
  266. max_seq_len = find_multiple(max_seq_len, 8)
  267. self.max_seq_len = max_seq_len
  268. self.max_batch_size = max_batch_size
  269. for b in self.layers:
  270. b.attention.kv_cache = KVCache(
  271. max_batch_size,
  272. max_seq_len,
  273. self.config.n_local_heads,
  274. self.config.head_dim,
  275. dtype=dtype,
  276. )
  277. def embed(self, inp: Tensor) -> Tensor:
  278. embeds = []
  279. for i in range(self.config.num_codebooks):
  280. emb = self.codebook_embeddings(
  281. inp[:, i + 1] + i * self.config.codebook_size
  282. )
  283. embeds.append(emb)
  284. vq_embeds_sum = torch.stack(embeds, dim=1).sum(dim=1)
  285. is_semantic = (inp[:, 0] >= self.config.semantic_begin_id) & (
  286. inp[:, 0] <= self.config.semantic_end_id
  287. )
  288. vq_embeds_sum[~is_semantic] = 0
  289. x = self.embeddings(inp[:, 0]) + vq_embeds_sum
  290. return x
  291. def forward(
  292. self,
  293. inp: Tensor,
  294. key_padding_mask: Optional[Tensor] = None,
  295. ) -> BaseTransformerForwardResult:
  296. seq_len = inp.size(2)
  297. # Here we want to merge the embeddings of the codebooks
  298. x = self.embed(inp)
  299. freqs_cis = self.freqs_cis[:seq_len]
  300. mask = None
  301. if key_padding_mask is not None:
  302. causal = self.causal_mask[:seq_len, :seq_len]
  303. causal = rearrange(causal, "q k -> 1 1 q k")
  304. atten_mask = rearrange(key_padding_mask, "b s -> b 1 1 s")
  305. atten_mask = atten_mask.logical_not()
  306. mask = causal & atten_mask
  307. for layer in self.layers:
  308. if self.config.use_gradient_checkpointing and self.training:
  309. x = checkpoint(layer, x, freqs_cis, mask, use_reentrant=True)
  310. else:
  311. x = layer(x, freqs_cis, mask)
  312. slow_out = self.norm(x)
  313. if self.config.tie_word_embeddings:
  314. token_logits = F.linear(slow_out, self.embeddings.weight)
  315. else:
  316. token_logits = self.output(slow_out)
  317. hidden_out = (
  318. slow_out if getattr(self.config, "norm_fastlayer_input", False) else x
  319. )
  320. return BaseTransformerForwardResult(
  321. logits=token_logits,
  322. hidden_states=hidden_out,
  323. )
  324. def forward_generate(
  325. self,
  326. inp: Tensor,
  327. input_pos: Optional[Tensor] = None,
  328. audio_masks: Optional[Tensor] = None,
  329. audio_parts: Optional[Tensor] = None,
  330. return_all: bool = False,
  331. ) -> BaseTransformerForwardResult:
  332. # Embedding logic replicated from embed() for compilation compatibility
  333. embeds = []
  334. for i in range(self.config.num_codebooks):
  335. emb = self.codebook_embeddings(
  336. inp[:, i + 1] + i * self.config.codebook_size
  337. )
  338. embeds.append(emb)
  339. vq_embeds_sum = torch.stack(embeds, dim=1).sum(dim=1)
  340. vq_masks = (inp[:, 0] >= self.config.semantic_begin_id) & (
  341. inp[:, 0] <= self.config.semantic_end_id
  342. )
  343. vq_embeds_sum[~vq_masks] = 0
  344. x = self.embeddings(inp[:, 0]) + vq_embeds_sum
  345. if self.config.scale_codebook_embeddings:
  346. vq_masks_expanded = vq_masks.unsqueeze(-1).expand_as(x)
  347. x = torch.where(
  348. vq_masks_expanded, x / math.sqrt(self.config.num_codebooks + 1), x
  349. )
  350. # Audio embeddings
  351. if audio_parts is not None:
  352. # Note: This assumes self.audio_projector exists if audio_parts is used
  353. # It seems missing in init, but we keep existing logic
  354. if hasattr(self, "audio_projector"):
  355. audio_embeds = self.audio_projector(audio_parts)
  356. if self.config.scale_codebook_embeddings:
  357. x[audio_masks] = audio_embeds / math.sqrt(2)
  358. else:
  359. x[audio_masks] = audio_embeds
  360. else:
  361. logger.warning("audio_parts provided but model has no audio_projector")
  362. if input_pos is None:
  363. input_pos = torch.arange(inp.shape[-1], device=x.device)
  364. max_seq_len = inp.shape[-1]
  365. else:
  366. max_seq_len = self.max_seq_len
  367. mask = self.causal_mask[None, None, input_pos, :max_seq_len] # (B, N, Q, K)
  368. freqs_cis = self.freqs_cis[input_pos]
  369. for layer in self.layers:
  370. x = layer(x, freqs_cis, mask, input_pos=input_pos)
  371. if x.size(1) > 1 and not return_all:
  372. x = x[:, -1:]
  373. slow_out = self.norm(x)
  374. if self.config.is_reward_model:
  375. token_logits = self.score_output(slow_out)
  376. elif self.config.tie_word_embeddings:
  377. token_logits = F.linear(slow_out, self.embeddings.weight)
  378. else:
  379. token_logits = self.output(slow_out)
  380. hidden_out = (
  381. slow_out if getattr(self.config, "norm_fastlayer_input", False) else x
  382. )
  383. return BaseTransformerForwardResult(
  384. logits=token_logits,
  385. hidden_states=hidden_out,
  386. )
  387. def _init_weights(self, module):
  388. std = self.config.initializer_range
  389. if isinstance(module, nn.Linear):
  390. module.weight.data.normal_(mean=0.0, std=std)
  391. if module.bias is not None:
  392. module.bias.data.zero_()
  393. elif isinstance(module, nn.Embedding):
  394. module.weight.data.normal_(mean=0.0, std=std)
  395. if module.padding_idx is not None:
  396. module.weight.data[module.padding_idx].zero_()
  397. @staticmethod
  398. def from_pretrained(
  399. path: str,
  400. load_weights: bool = False,
  401. max_length: int | None = None,
  402. lora_config: LoraConfig | None = None,
  403. rope_base: int | None = None,
  404. ) -> "BaseTransformer":
  405. # Import wrapper locally to avoid circular dependency or global import issues
  406. from fish_speech.tokenizer import FishTokenizer
  407. config = BaseModelArgs.from_pretrained(str(path))
  408. if max_length is not None:
  409. config.max_seq_len = max_length
  410. logger.info(f"Override max_seq_len to {max_length}")
  411. if rope_base is not None:
  412. config.rope_base = rope_base
  413. logger.info(f"Override rope_base to {rope_base}")
  414. tokenizer = None
  415. try:
  416. tokenizer = FishTokenizer.from_pretrained(path)
  417. config.semantic_begin_id = tokenizer.semantic_begin_id
  418. config.semantic_end_id = tokenizer.semantic_end_id
  419. logger.info(
  420. f"Injected Semantic IDs into Config: {config.semantic_begin_id}-{config.semantic_end_id}"
  421. )
  422. except Exception as e:
  423. logger.warning(
  424. f"Failed to load tokenizer for config injection: {e}. Semantic IDs might be 0."
  425. )
  426. match config.model_type:
  427. case "naive":
  428. model_cls = NaiveTransformer
  429. case "dual_ar":
  430. model_cls = DualARTransformer
  431. case _:
  432. raise ValueError(f"Unknown model type: {config.model_type}")
  433. logger.info(f"Loading model from {path}, config: {config}")
  434. # Initialize model without passing tokenizer explicitly to __init__
  435. model = model_cls(config)
  436. # Attach tokenizer to model instance for inference convenience (optional, but good for user scripts)
  437. model.tokenizer = tokenizer
  438. if load_weights is False:
  439. logger.info("Randomly initialized model")
  440. else:
  441. if "int8" in str(Path(path)):
  442. logger.info("Using int8 weight-only quantization!")
  443. from tools.llama.quantize import WeightOnlyInt8QuantHandler
  444. simple_quantizer = WeightOnlyInt8QuantHandler(model)
  445. model = simple_quantizer.convert_for_runtime()
  446. if "int4" in str(Path(path)):
  447. logger.info("Using int4 quantization!")
  448. path_comps = path.name.split("-")
  449. assert path_comps[-2].startswith("g")
  450. groupsize = int(path_comps[-2][1:])
  451. from tools.llama.quantize import WeightOnlyInt4QuantHandler
  452. simple_quantizer = WeightOnlyInt4QuantHandler(model, groupsize)
  453. model = simple_quantizer.convert_for_runtime()
  454. path_obj = Path(path)
  455. index_json = path_obj / "model.safetensors.index.json"
  456. single_st = path_obj / "model.safetensors"
  457. pth_file = path_obj / "model.pth"
  458. if index_json.exists():
  459. logger.info("Loading sharded safetensors weights")
  460. from safetensors.torch import load_file as st_load_file
  461. with open(index_json) as f:
  462. st_index = json.load(f)
  463. shard_files = sorted(set(st_index["weight_map"].values()))
  464. weights = OrderedDict()
  465. for shard in shard_files:
  466. weights.update(st_load_file(str(path_obj / shard), device="cpu"))
  467. weights = _remap_fish_qwen3_omni_keys(weights)
  468. elif single_st.exists():
  469. logger.info("Loading single safetensors weights")
  470. from safetensors.torch import load_file as st_load_file
  471. weights = OrderedDict(st_load_file(str(single_st), device="cpu"))
  472. weights = _remap_fish_qwen3_omni_keys(weights)
  473. elif pth_file.exists():
  474. weights = torch.load(
  475. pth_file,
  476. map_location="cpu",
  477. mmap=True,
  478. weights_only=True,
  479. )
  480. if "state_dict" in weights:
  481. weights = weights["state_dict"]
  482. if weights and next(iter(weights.keys())).startswith("model."):
  483. weights = OrderedDict(
  484. (k.replace("model.", ""), v) for k, v in weights.items()
  485. )
  486. for k in list(weights.keys()):
  487. if "audio_" in k:
  488. weights.pop(k)
  489. else:
  490. raise FileNotFoundError(f"No model weights found in {path_obj}")
  491. err = model.load_state_dict(weights, strict=False, assign=True)
  492. logger.info(f"Model weights loaded - Status: {err}")
  493. if lora_config is not None:
  494. setup_lora(model, lora_config)
  495. logger.info(f"LoRA setup: {lora_config}")
  496. return model
  497. def save_pretrained(self, path: str, drop_lora: bool = False):
  498. path = Path(path)
  499. path.mkdir(parents=True, exist_ok=True)
  500. self.config.save(path / "config.json")
  501. state_dict = self.state_dict()
  502. if drop_lora:
  503. for key in list(state_dict.keys()):
  504. if "lora" not in key:
  505. continue
  506. state_dict.pop(key)
  507. torch.save(state_dict, path / "model.pth")
  508. if hasattr(self, "tokenizer"):
  509. self.tokenizer.save_pretrained(path)
  510. class NaiveTransformer(BaseTransformer):
  511. def __init__(self, config: NaiveModelArgs) -> None:
  512. super().__init__(config, init_weights=False)
  513. self.codebook_norm = RMSNorm(config.dim, eps=config.norm_eps)
  514. self.codebook_output = nn.Linear(
  515. config.dim,
  516. config.codebook_size * config.num_codebooks,
  517. bias=False,
  518. )
  519. self.apply(self._init_weights)
  520. def decode(self, result: BaseTransformerForwardResult) -> TransformerForwardResult:
  521. token_logits = result.logits
  522. x = result.hidden_states
  523. # Codebook
  524. codebook_logits = self.codebook_output(self.codebook_norm(x))
  525. codebook_logits = rearrange(
  526. codebook_logits, "b n (c d) -> b n c d", c=self.config.num_codebooks
  527. )
  528. return TransformerForwardResult(
  529. token_logits=token_logits,
  530. codebook_logits=codebook_logits,
  531. )
  532. def forward(
  533. self,
  534. inp: Tensor,
  535. key_padding_mask: Optional[Tensor] = None,
  536. ) -> TransformerForwardResult:
  537. result = super().forward(
  538. inp=inp,
  539. key_padding_mask=key_padding_mask,
  540. )
  541. return self.decode(result)
  542. def forward_generate(
  543. self, x: Tensor, input_pos: Optional[Tensor] = None
  544. ) -> TransformerForwardResult:
  545. result = super().forward_generate(x, input_pos)
  546. return self.decode(result)
  547. class DualARTransformer(BaseTransformer):
  548. def __init__(self, config: NaiveModelArgs) -> None:
  549. super().__init__(config, init_weights=False)
  550. # Project to fast dim if needed
  551. if config.fast_dim is not None and config.fast_dim != config.dim:
  552. self.fast_project_in = nn.Linear(config.dim, config.fast_dim)
  553. else:
  554. self.fast_project_in = nn.Identity()
  555. # Fast transformer
  556. self.fast_embeddings = nn.Embedding(config.codebook_size, config.fast_dim)
  557. # The equivalent bs is so large that sdpa doesn't work
  558. override_config = dataclasses.replace(
  559. config,
  560. dim=config.fast_dim,
  561. n_head=config.fast_n_head,
  562. n_local_heads=config.fast_n_local_heads,
  563. head_dim=config.fast_head_dim,
  564. intermediate_size=config.fast_intermediate_size,
  565. attention_qkv_bias=config.fast_attention_qkv_bias,
  566. attention_qk_norm=config.fast_attention_qk_norm,
  567. attention_o_bias=config.fast_attention_o_bias,
  568. )
  569. self.fast_layers = nn.ModuleList(
  570. TransformerBlock(override_config, use_sdpa=False)
  571. for _ in range(config.n_fast_layer)
  572. )
  573. self.fast_norm = RMSNorm(config.fast_dim, eps=config.norm_eps)
  574. self.fast_output = nn.Linear(
  575. config.fast_dim,
  576. config.codebook_size,
  577. bias=False,
  578. )
  579. self.register_buffer(
  580. "fast_freqs_cis",
  581. precompute_freqs_cis(
  582. config.num_codebooks,
  583. config.fast_head_dim,
  584. config.rope_base,
  585. ),
  586. persistent=False,
  587. )
  588. self.apply(self._init_weights)
  589. def setup_caches(
  590. self, max_batch_size: int, max_seq_len: int, dtype: torch.dtype = torch.bfloat16
  591. ):
  592. super().setup_caches(max_batch_size, max_seq_len, dtype)
  593. # Fast transformer
  594. # The max seq len here is the number of codebooks
  595. for b in self.fast_layers:
  596. b.attention.kv_cache = KVCache(
  597. max_batch_size,
  598. self.config.num_codebooks,
  599. self.config.fast_n_local_heads,
  600. self.config.fast_head_dim,
  601. dtype=dtype,
  602. )
  603. def forward(
  604. self,
  605. inp: Tensor,
  606. labels: Optional[Tensor] = None,
  607. key_padding_mask: Optional[Tensor] = None,
  608. vq_parts: Optional[Tensor] = None,
  609. vq_masks: Optional[Tensor] = None,
  610. vq_require_losses: Optional[Tensor] = None,
  611. mel_parts: Optional[Tensor] = None,
  612. mel_masks: Optional[Tensor] = None,
  613. ) -> TransformerForwardResult:
  614. parent_result = super().forward(
  615. inp=inp,
  616. key_padding_mask=key_padding_mask,
  617. )
  618. token_logits = parent_result.logits
  619. x = parent_result.hidden_states
  620. # Fast transformer
  621. fast_seq_len = self.config.num_codebooks
  622. fast_mask = self.causal_mask[
  623. None, None, :fast_seq_len, :fast_seq_len
  624. ] # (B, N, Q, K)
  625. fast_freqs_cis = self.fast_freqs_cis[:fast_seq_len]
  626. # Extract corresponding parts with labels
  627. token_labels = labels[:, 0]
  628. # [MODIFIED] Use config instead of tokenizer
  629. codebook_mask = (token_labels >= self.config.semantic_begin_id) & (
  630. token_labels <= self.config.semantic_end_id
  631. )
  632. # This gives where input token is <|semantic|>
  633. x = x[codebook_mask]
  634. if x.shape[0] == 0:
  635. # Use dummy input when no vq is required
  636. x = torch.zeros(
  637. (4, self.config.dim),
  638. device=x.device,
  639. dtype=x.dtype,
  640. )
  641. codebooks = torch.zeros(
  642. (x.shape[0], self.config.num_codebooks - 1),
  643. device=x.device,
  644. dtype=torch.int,
  645. )
  646. else:
  647. all_codebooks = labels[:, 1:, :]
  648. all_codebooks_permuted = all_codebooks.permute(0, 2, 1)
  649. semantic_codebooks = all_codebooks_permuted[codebook_mask]
  650. codebooks = semantic_codebooks[:, :-1]
  651. x = self.fast_project_in(x)
  652. codebook_embeddings = self.fast_embeddings(codebooks)
  653. x = torch.cat([x[:, None], codebook_embeddings], dim=1)
  654. for layer in self.fast_layers:
  655. if self.config.use_gradient_checkpointing and self.training:
  656. x = checkpoint(layer, x, fast_freqs_cis, fast_mask, use_reentrant=True)
  657. else:
  658. x = layer(x, fast_freqs_cis, fast_mask)
  659. # unflatten the batch and num_codebooks
  660. fast_out = self.fast_norm(x)
  661. codebook_logits = self.fast_output(fast_out)
  662. assert codebook_logits.shape[1] == self.config.num_codebooks
  663. return TransformerForwardResult(
  664. token_logits=token_logits,
  665. codebook_logits=codebook_logits,
  666. )
  667. def forward_generate_fast(
  668. self, x: Tensor, input_pos: Optional[Tensor] = None
  669. ) -> Tensor:
  670. # Fast transformer
  671. x = x.view(x.shape[0], 1, -1)
  672. fast_mask = self.causal_mask[
  673. None, None, input_pos, : self.config.num_codebooks
  674. ] # (B, N, Q, K)
  675. fast_freqs_cis = self.fast_freqs_cis[input_pos]
  676. for layer in self.fast_layers:
  677. x = layer(x, fast_freqs_cis, fast_mask, input_pos=input_pos)
  678. # unflatten the batch and num_codebooks
  679. fast_out = self.fast_norm(x) # only take the last token
  680. codebook_logits = self.fast_output(fast_out)
  681. return codebook_logits
  682. def forward_generate(
  683. self,
  684. x: Tensor,
  685. input_pos: Optional[Tensor] = None,
  686. audio_masks: Optional[Tensor] = None,
  687. audio_parts: Optional[Tensor] = None,
  688. ) -> TransformerForwardResult:
  689. x = super().forward_generate(x, input_pos, audio_masks, audio_parts)
  690. x.hidden_states = self.fast_project_in(x.hidden_states)
  691. return x
  692. class TransformerBlock(nn.Module):
  693. def __init__(self, config: BaseModelArgs, use_sdpa: bool = True) -> None:
  694. super().__init__()
  695. self.attention = Attention(config, use_sdpa=use_sdpa)
  696. self.feed_forward = FeedForward(config)
  697. self.ffn_norm = RMSNorm(config.dim, config.norm_eps)
  698. self.attention_norm = RMSNorm(config.dim, config.norm_eps)
  699. def forward(
  700. self, x: Tensor, freqs_cis: Tensor, mask: Tensor, input_pos: Tensor = None
  701. ) -> Tensor:
  702. h = x + self.attention(self.attention_norm(x), freqs_cis, mask, input_pos)
  703. out = h + self.feed_forward(self.ffn_norm(h))
  704. return out
  705. class Attention(nn.Module):
  706. def __init__(self, config: BaseModelArgs, use_sdpa: bool = True):
  707. super().__init__()
  708. assert config.dim % config.n_head == 0
  709. total_head_dim = (config.n_head + 2 * config.n_local_heads) * config.head_dim
  710. # key, query, value projections for all heads, but in a batch
  711. self.wqkv = nn.Linear(
  712. config.dim, total_head_dim, bias=config.attention_qkv_bias
  713. )
  714. self.wo = nn.Linear(
  715. config.n_head * config.head_dim, config.dim, bias=config.attention_o_bias
  716. )
  717. self.kv_cache = None
  718. if config.attention_qk_norm:
  719. self.q_norm = nn.RMSNorm(config.head_dim, config.norm_eps)
  720. self.k_norm = nn.RMSNorm(config.head_dim, config.norm_eps)
  721. self.dropout = config.dropout
  722. self.n_head = config.n_head
  723. self.head_dim = config.head_dim
  724. self.n_local_heads = config.n_local_heads
  725. self.dim = config.dim
  726. self.use_sdpa = use_sdpa
  727. self.attention_qk_norm = config.attention_qk_norm
  728. self.config = config
  729. self._register_load_state_dict_pre_hook(self.load_hook)
  730. def load_hook(self, state_dict, prefix, *args):
  731. if prefix + "wq.weight" in state_dict:
  732. wq = state_dict.pop(prefix + "wq.weight")
  733. wk = state_dict.pop(prefix + "wk.weight")
  734. wv = state_dict.pop(prefix + "wv.weight")
  735. state_dict[prefix + "wqkv.weight"] = torch.cat([wq, wk, wv])
  736. def forward(
  737. self,
  738. x: Tensor,
  739. freqs_cis: Tensor,
  740. mask: Tensor,
  741. input_pos: Optional[Tensor] = None,
  742. ) -> Tensor:
  743. bsz, seqlen, _ = x.shape
  744. q_size = self.n_head * self.head_dim
  745. kv_size = self.n_local_heads * self.head_dim
  746. q, k, v = self.wqkv(x).split([q_size, kv_size, kv_size], dim=-1)
  747. q = q.view(bsz, seqlen, self.n_head, self.head_dim)
  748. k = k.view(bsz, seqlen, self.n_local_heads, self.head_dim)
  749. v = v.view(bsz, seqlen, self.n_local_heads, self.head_dim)
  750. if self.attention_qk_norm:
  751. q = self.q_norm(q)
  752. k = self.k_norm(k)
  753. q = apply_rotary_emb(q, freqs_cis)
  754. k = apply_rotary_emb(k, freqs_cis)
  755. q, k, v = map(lambda x: x.transpose(1, 2), (q, k, v))
  756. if self.kv_cache is not None:
  757. k, v = self.kv_cache.update(input_pos, k, v)
  758. k = k.repeat_interleave(self.n_head // self.n_local_heads, dim=1)
  759. v = v.repeat_interleave(self.n_head // self.n_local_heads, dim=1)
  760. if self.use_sdpa:
  761. if mask is None:
  762. with sdpa_kernel(SDPBackend.FLASH_ATTENTION):
  763. y = F.scaled_dot_product_attention(
  764. q,
  765. k,
  766. v,
  767. dropout_p=self.dropout if self.training else 0.0,
  768. is_causal=True,
  769. # No third party attn_mask here to use flash_attention
  770. )
  771. else:
  772. y = F.scaled_dot_product_attention(
  773. q,
  774. k,
  775. v,
  776. attn_mask=mask,
  777. dropout_p=self.dropout if self.training else 0.0,
  778. )
  779. else:
  780. y = self.eq_scaled_dot_product_attention(
  781. q,
  782. k,
  783. v,
  784. attn_mask=mask,
  785. dropout_p=self.dropout if self.training else 0.0,
  786. )
  787. y = y.transpose(1, 2).contiguous().view(bsz, seqlen, q_size)
  788. return self.wo(y)
  789. def eq_scaled_dot_product_attention(
  790. self,
  791. query,
  792. key,
  793. value,
  794. attn_mask=None,
  795. dropout_p=0.0,
  796. ) -> torch.Tensor:
  797. # This is a standard scaled dot product attention
  798. # It's low efficient, but it doesn't raise cuda error
  799. L, S = query.size(-2), key.size(-2)
  800. scale_factor = 1 / math.sqrt(query.size(-1))
  801. attn_bias = torch.zeros(1, 1, L, S, dtype=query.dtype, device=query.device)
  802. if attn_mask is not None:
  803. if attn_mask.dtype == torch.bool:
  804. attn_bias = torch.where(
  805. attn_mask.logical_not(), float("-inf"), attn_bias
  806. )
  807. else:
  808. attn_bias = attn_bias + attn_mask
  809. attn_weight = query @ key.transpose(-2, -1) * scale_factor
  810. attn_weight += attn_bias
  811. attn_weight = torch.softmax(attn_weight, dim=-1)
  812. attn_weight = torch.dropout(attn_weight, dropout_p, train=True)
  813. return attn_weight @ value
  814. class FeedForward(nn.Module):
  815. def __init__(self, config: BaseModelArgs) -> None:
  816. super().__init__()
  817. self.w1 = nn.Linear(config.dim, config.intermediate_size, bias=False)
  818. self.w3 = nn.Linear(config.dim, config.intermediate_size, bias=False)
  819. self.w2 = nn.Linear(config.intermediate_size, config.dim, bias=False)
  820. def forward(self, x: Tensor) -> Tensor:
  821. return self.w2(F.silu(self.w1(x)) * self.w3(x))
  822. class RMSNorm(nn.Module):
  823. def __init__(self, dim: int, eps: float = 1e-5):
  824. super().__init__()
  825. self.eps = eps
  826. self.weight = nn.Parameter(torch.ones(dim))
  827. def _norm(self, x):
  828. return x * torch.rsqrt(torch.mean(x * x, dim=-1, keepdim=True) + self.eps)
  829. def forward(self, x: Tensor) -> Tensor:
  830. output = self._norm(x.float()).type_as(x)
  831. return output * self.weight
  832. def precompute_freqs_cis(seq_len: int, n_elem: int, base: int = 10000) -> Tensor:
  833. """
  834. Precomputes frequency tensors for complex exponentials (cis)
  835. Args:
  836. seq_len: Length of the sequence for which positional embeddings are needed.
  837. n_elem: Number of elements in the frequency tensor.
  838. base: Base value for the frequency scaling (default: 10000).
  839. Returns:
  840. A tensor containing the precomputed frequencies in real and imaginary parts (bfloat16).
  841. """
  842. freqs = 1.0 / (
  843. base ** (torch.arange(0, n_elem, 2)[: (n_elem // 2)].float() / n_elem)
  844. )
  845. t = torch.arange(seq_len, device=freqs.device)
  846. freqs = torch.outer(t, freqs)
  847. freqs_cis = torch.polar(torch.ones_like(freqs), freqs)
  848. cache = torch.stack([freqs_cis.real, freqs_cis.imag], dim=-1)
  849. return cache.to(dtype=torch.bfloat16)
  850. def apply_rotary_emb(x: Tensor, freqs_cis: Tensor) -> Tensor:
  851. xshaped = x.float().reshape(*x.shape[:-1], -1, 2)
  852. freqs_cis = freqs_cis.view(1, xshaped.size(1), 1, xshaped.size(3), 2)
  853. x_out2 = torch.stack(
  854. [
  855. xshaped[..., 0] * freqs_cis[..., 0] - xshaped[..., 1] * freqs_cis[..., 1],
  856. xshaped[..., 1] * freqs_cis[..., 0] + xshaped[..., 0] * freqs_cis[..., 1],
  857. ],
  858. -1,
  859. )
  860. x_out2 = x_out2.flatten(3)
  861. return x_out2.type_as(x)