llama.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887
  1. import dataclasses
  2. import json
  3. import math
  4. from collections import OrderedDict
  5. from dataclasses import dataclass
  6. from pathlib import Path
  7. from typing import Optional
  8. import torch
  9. import torch.nn as nn
  10. from einops import rearrange
  11. from loguru import logger
  12. from torch import Tensor
  13. from torch.nn import functional as F
  14. from torch.nn.attention import SDPBackend, sdpa_kernel
  15. from torch.utils.checkpoint import checkpoint
  16. from transformers import AutoTokenizer
  17. from fish_speech.tokenizer import SEMANTIC_TOKENS, FishTokenizer
  18. from fish_speech.utils import RankedLogger
  19. from .lora import LoraConfig, setup_lora
  20. log = RankedLogger(__name__, rank_zero_only=True)
  21. def find_multiple(n: int, k: int) -> int:
  22. if n % k == 0:
  23. return n
  24. return n + k - (n % k)
  25. @dataclass
  26. class BaseModelArgs:
  27. model_type: str = "base"
  28. vocab_size: int = 32000
  29. n_layer: int = 32
  30. n_head: int = 32
  31. dim: int = 4096
  32. intermediate_size: int = None
  33. n_local_heads: int = -1
  34. head_dim: int = 64
  35. rope_base: float = 10000
  36. norm_eps: float = 1e-5
  37. max_seq_len: int = 2048
  38. dropout: float = 0.0
  39. tie_word_embeddings: bool = True
  40. attention_qkv_bias: bool = False
  41. # Codebook configs
  42. codebook_size: int = 160
  43. num_codebooks: int = 4
  44. # Gradient checkpointing
  45. use_gradient_checkpointing: bool = True
  46. # Initialize the model
  47. initializer_range: float = 0.02
  48. # Dummy vars
  49. is_reward_model: bool = False
  50. share_codebook_embeddings: bool = True
  51. scale_codebook_embeddings: bool = False
  52. def __post_init__(self):
  53. if self.n_local_heads == -1:
  54. self.n_local_heads = self.n_head
  55. if self.intermediate_size is None:
  56. hidden_dim = 4 * self.dim
  57. n_hidden = int(2 * hidden_dim / 3)
  58. self.intermediate_size = find_multiple(n_hidden, 256)
  59. self.head_dim = self.dim // self.n_head
  60. @staticmethod
  61. def from_pretrained(path: str):
  62. path = Path(path)
  63. if path.is_dir():
  64. path = path / "config.json"
  65. with open(path, "r", encoding="utf-8") as f:
  66. data = json.load(f)
  67. match data["model_type"]:
  68. case "naive":
  69. cls = NaiveModelArgs
  70. case "dual_ar":
  71. cls = DualARModelArgs
  72. case _:
  73. raise ValueError(f"Unknown model type: {data['model_type']}")
  74. return cls(**data)
  75. def save(self, path: str):
  76. with open(path, "w") as f:
  77. json.dump(self.__dict__, f, indent=4, sort_keys=True, ensure_ascii=False)
  78. @dataclass
  79. class NaiveModelArgs(BaseModelArgs):
  80. model_type: str = "naive"
  81. @dataclass
  82. class DualARModelArgs(BaseModelArgs):
  83. model_type: str = "dual_ar"
  84. n_fast_layer: int = 4
  85. fast_dim: int | None = None
  86. fast_n_head: int | None = None
  87. fast_n_local_heads: int | None = None
  88. fast_head_dim: int | None = None
  89. fast_intermediate_size: int | None = None
  90. fast_attention_qkv_bias: bool | None = None
  91. def __post_init__(self):
  92. super().__post_init__()
  93. self.fast_dim = self.fast_dim or self.dim
  94. self.fast_n_head = self.fast_n_head or self.n_head
  95. self.fast_n_local_heads = self.fast_n_local_heads or self.n_local_heads
  96. self.fast_head_dim = self.fast_head_dim or self.head_dim
  97. self.fast_intermediate_size = (
  98. self.fast_intermediate_size or self.intermediate_size
  99. )
  100. self.fast_attention_qkv_bias = (
  101. self.fast_attention_qkv_bias
  102. if self.fast_attention_qkv_bias is not None
  103. else self.attention_qkv_bias
  104. )
  105. class KVCache(nn.Module):
  106. def __init__(
  107. self, max_batch_size, max_seq_len, n_heads, head_dim, dtype=torch.bfloat16
  108. ):
  109. super().__init__()
  110. cache_shape = (max_batch_size, n_heads, max_seq_len, head_dim)
  111. self.register_buffer("k_cache", torch.zeros(cache_shape, dtype=dtype))
  112. self.register_buffer("v_cache", torch.zeros(cache_shape, dtype=dtype))
  113. def update(self, input_pos, k_val, v_val):
  114. # input_pos: [S], k_val: [B, H, S, D]
  115. assert input_pos.shape[0] == k_val.shape[2]
  116. k_out = self.k_cache
  117. v_out = self.v_cache
  118. k_out[:, :, input_pos] = k_val
  119. v_out[:, :, input_pos] = v_val
  120. return k_out, v_out
  121. @dataclass
  122. class TransformerForwardResult:
  123. token_logits: Tensor
  124. codebook_logits: Tensor
  125. @dataclass
  126. class BaseTransformerForwardResult:
  127. logits: Tensor
  128. hidden_states: Tensor
  129. class BaseTransformer(nn.Module):
  130. def __init__(
  131. self,
  132. config: BaseModelArgs,
  133. tokenizer: FishTokenizer | AutoTokenizer,
  134. init_weights: bool = True,
  135. ) -> None:
  136. super().__init__()
  137. self.config = config
  138. self.tokenizer = tokenizer
  139. self.semantic_token_ids = [
  140. tokenizer.get_token_id(SEMANTIC_TOKEN) for SEMANTIC_TOKEN in SEMANTIC_TOKENS
  141. ]
  142. # Slow transformer
  143. self.embeddings = nn.Embedding(
  144. config.vocab_size,
  145. config.dim,
  146. )
  147. self.codebook_embeddings = nn.Embedding(
  148. config.codebook_size * config.num_codebooks,
  149. config.dim,
  150. )
  151. self.layers = nn.ModuleList(
  152. TransformerBlock(config, use_sdpa=True) for _ in range(config.n_layer)
  153. )
  154. self.norm = RMSNorm(config.dim, eps=config.norm_eps)
  155. if self.config.tie_word_embeddings is False:
  156. self.output = nn.Linear(
  157. config.dim,
  158. config.vocab_size,
  159. bias=False,
  160. )
  161. self.register_buffer(
  162. "freqs_cis",
  163. precompute_freqs_cis(
  164. config.max_seq_len,
  165. config.dim // config.n_head,
  166. config.rope_base,
  167. ),
  168. persistent=False,
  169. )
  170. self.register_buffer(
  171. "causal_mask",
  172. torch.tril(
  173. torch.ones(
  174. config.max_seq_len,
  175. config.max_seq_len,
  176. dtype=torch.bool,
  177. )
  178. ),
  179. persistent=False,
  180. )
  181. # For kv cache
  182. self.max_batch_size = -1
  183. self.max_seq_len = -1
  184. if init_weights:
  185. self.apply(self._init_weights)
  186. def setup_caches(
  187. self, max_batch_size: int, max_seq_len: int, dtype: torch.dtype = torch.bfloat16
  188. ):
  189. if self.max_seq_len >= max_seq_len and self.max_batch_size >= max_batch_size:
  190. return
  191. head_dim = self.config.dim // self.config.n_head
  192. max_seq_len = find_multiple(max_seq_len, 8)
  193. self.max_seq_len = max_seq_len
  194. self.max_batch_size = max_batch_size
  195. for b in self.layers:
  196. b.attention.kv_cache = KVCache(
  197. max_batch_size,
  198. max_seq_len,
  199. self.config.n_local_heads,
  200. head_dim,
  201. dtype=dtype,
  202. )
  203. def embed(self, x: Tensor) -> Tensor:
  204. vocab_embeds = [self.embeddings(x[:, 0])]
  205. for i in range(self.config.num_codebooks):
  206. emb = self.codebook_embeddings(x[:, i + 1] + i * self.config.codebook_size)
  207. semantic_token_ids_tensor = torch.tensor(
  208. self.semantic_token_ids, device=x.device
  209. )
  210. emb[~torch.isin(x[:, 0], semantic_token_ids_tensor)] = 0
  211. x = torch.stack(vocab_embeds, dim=3)
  212. x = x.sum(dim=3)
  213. return x
  214. def forward(
  215. self,
  216. inp: Tensor,
  217. key_padding_mask: Optional[Tensor] = None,
  218. ) -> BaseTransformerForwardResult:
  219. seq_len = inp.size(2)
  220. # Here we want to merge the embeddings of the codebooks
  221. x = self.embed(inp)
  222. freqs_cis = self.freqs_cis[:seq_len]
  223. # Not that the causal mask here follows the definition of scaled_dot_product_attention
  224. # That is, FALSE means masked out
  225. # To maintain consistency, key_padding_mask use TRUE to mask out
  226. mask = None
  227. if key_padding_mask is not None:
  228. mask = self.causal_mask[None, None, :seq_len, :seq_len] # (B, N, Q, K)
  229. mask = mask & key_padding_mask[:, None, None, :].logical_not()
  230. for layer in self.layers:
  231. if self.config.use_gradient_checkpointing and self.training:
  232. x = checkpoint(layer, x, freqs_cis, mask, use_reentrant=True)
  233. else:
  234. x = layer(x, freqs_cis, mask)
  235. # We got slow_out here
  236. slow_out = self.norm(x)
  237. if self.config.tie_word_embeddings:
  238. token_logits = F.linear(slow_out, self.embeddings.weight)
  239. else:
  240. token_logits = self.output(slow_out)
  241. return BaseTransformerForwardResult(
  242. logits=token_logits,
  243. hidden_states=x,
  244. )
  245. def forward_generate(
  246. self,
  247. inp: Tensor,
  248. input_pos: Optional[Tensor] = None,
  249. vq_masks: Optional[Tensor] = None, # this is not used in fact
  250. return_all: bool = False,
  251. ) -> BaseTransformerForwardResult:
  252. # This is used for generation, optimized for torch compile
  253. # assert (
  254. # self.max_seq_len != -1 and self.max_batch_size != -1
  255. # ), "Please call setup_caches before forward_generate"
  256. embeds = []
  257. for i in range(self.config.num_codebooks):
  258. if self.config.share_codebook_embeddings:
  259. _tokens = inp[:, i + 1] + i * self.config.codebook_size
  260. else:
  261. _tokens = inp[:, i + 1]
  262. emb = self.codebook_embeddings(_tokens)
  263. embeds.append(emb)
  264. vq_embeds_sum = torch.stack(embeds, dim=1).sum(dim=1)
  265. # if self.config.use_codebook_mlp:
  266. # vq_embeds_sum = vq_embeds_sum / self.config.num_codebooks
  267. # vq_embeds_sum = self.codebook_mlp(vq_embeds_sum)
  268. vq_masks = (inp[:, 0] >= self.tokenizer.semantic_begin_id) & (
  269. inp[:, 0] <= self.tokenizer.semantic_end_id
  270. )
  271. vq_embeds_sum[~vq_masks] = 0
  272. x = self.embeddings(inp[:, 0]) + vq_embeds_sum
  273. if input_pos is None:
  274. input_pos = torch.arange(inp.shape[-1], device=x.device)
  275. max_seq_len = inp.shape[-1]
  276. else:
  277. max_seq_len = self.max_seq_len
  278. mask = self.causal_mask[None, None, input_pos, :max_seq_len] # (B, N, Q, K)
  279. freqs_cis = self.freqs_cis[input_pos]
  280. for layer in self.layers:
  281. x = layer(x, freqs_cis, mask, input_pos=input_pos)
  282. # If prefill, we only calculate the logits of last token
  283. if x.size(1) > 1 and not return_all:
  284. x = x[:, -1:]
  285. # We got slow_out here
  286. slow_out = self.norm(x)
  287. if self.config.is_reward_model:
  288. token_logits = self.score_output(slow_out)
  289. elif self.config.tie_word_embeddings:
  290. token_logits = F.linear(slow_out, self.embeddings.weight)
  291. else:
  292. token_logits = self.output(slow_out)
  293. return BaseTransformerForwardResult(
  294. logits=token_logits,
  295. hidden_states=x,
  296. )
  297. def _init_weights(self, module):
  298. std = self.config.initializer_range
  299. if isinstance(module, nn.Linear):
  300. module.weight.data.normal_(mean=0.0, std=std)
  301. if module.bias is not None:
  302. module.bias.data.zero_()
  303. elif isinstance(module, nn.Embedding):
  304. module.weight.data.normal_(mean=0.0, std=std)
  305. if module.padding_idx is not None:
  306. module.weight.data[module.padding_idx].zero_()
  307. @staticmethod
  308. def from_pretrained(
  309. path: str,
  310. load_weights: bool = False,
  311. max_length: int | None = None,
  312. lora_config: LoraConfig | None = None,
  313. rope_base: int | None = None,
  314. is_agent: bool = False,
  315. ) -> "BaseTransformer":
  316. config = BaseModelArgs.from_pretrained(str(path))
  317. if max_length is not None:
  318. config.max_seq_len = max_length
  319. log.info(f"Override max_seq_len to {max_length}")
  320. if rope_base is not None:
  321. config.rope_base = rope_base
  322. log.info(f"Override rope_base to {rope_base}")
  323. match config.model_type:
  324. case "naive":
  325. model_cls = NaiveTransformer
  326. case "dual_ar":
  327. model_cls = DualARTransformer
  328. case _:
  329. raise ValueError(f"Unknown model type: {config.model_type}")
  330. if is_agent:
  331. tokenizer = AutoTokenizer.from_pretrained(str(path))
  332. else:
  333. tokenizer_path = str(path) + "/tokenizer.tiktoken"
  334. tokenizer = FishTokenizer(tokenizer_path)
  335. log.info(f"Loading model from {path}, config: {config}")
  336. model = model_cls(config, tokenizer=tokenizer)
  337. if lora_config is not None:
  338. setup_lora(model, lora_config)
  339. log.info(f"LoRA setup: {lora_config}")
  340. if load_weights is False:
  341. log.info("Randomly initialized model")
  342. else:
  343. if "int8" in str(Path(path)):
  344. logger.info("Using int8 weight-only quantization!")
  345. from tools.llama.quantize import WeightOnlyInt8QuantHandler
  346. simple_quantizer = WeightOnlyInt8QuantHandler(model)
  347. model = simple_quantizer.convert_for_runtime()
  348. if "int4" in str(Path(path)):
  349. logger.info("Using int4 quantization!")
  350. path_comps = path.name.split("-")
  351. assert path_comps[-2].startswith("g")
  352. groupsize = int(path_comps[-2][1:])
  353. from tools.llama.quantize import WeightOnlyInt4QuantHandler
  354. simple_quantizer = WeightOnlyInt4QuantHandler(model, groupsize)
  355. model = simple_quantizer.convert_for_runtime()
  356. weights = torch.load(
  357. Path(path) / "model.pth",
  358. map_location="cpu",
  359. mmap=True,
  360. weights_only=True,
  361. )
  362. if "state_dict" in weights:
  363. logger.warning(
  364. "Using a TextToSemantic LightningModule checkpoint, "
  365. "please make sure it is a full model, not a LoRA model."
  366. )
  367. weights = weights["state_dict"]
  368. if next(iter(weights.keys())).startswith("model."):
  369. logger.info(
  370. f"Remove prefix 'model.' created by TextToSemantic LightningModule from keys"
  371. )
  372. new_weights = OrderedDict()
  373. for k, v in weights.items():
  374. new_weights[k.replace("model.", "")] = v
  375. weights = new_weights
  376. # Verify the name and shape of parameters since strict=False in load_state_dict.
  377. for k, v in model.named_parameters():
  378. if k not in weights:
  379. logger.warning(f"No weight for {k}")
  380. elif v.shape != weights[k].shape:
  381. logger.warning(
  382. f"Shape mismatch for {k}: {v.shape} vs {weights[k].shape}"
  383. )
  384. err = model.load_state_dict(weights, strict=False, assign=True)
  385. log.info(f"Loaded weights with error: {err}")
  386. return model
  387. def save_pretrained(self, path: str, drop_lora: bool = False):
  388. path = Path(path)
  389. path.mkdir(parents=True, exist_ok=True)
  390. self.config.save(path / "config.json")
  391. state_dict = self.state_dict()
  392. if drop_lora:
  393. for key in list(state_dict.keys()):
  394. if "lora" not in key:
  395. continue
  396. state_dict.pop(key)
  397. log.info(f"Drop LoRA parameter: {key}")
  398. torch.save(state_dict, path / "model.pth")
  399. self.tokenizer.save_pretrained(path)
  400. class NaiveTransformer(BaseTransformer):
  401. def __init__(self, config: NaiveModelArgs, tokenizer: FishTokenizer) -> None:
  402. super().__init__(config, init_weights=False, tokenizer=tokenizer)
  403. self.codebook_norm = RMSNorm(config.dim, eps=config.norm_eps)
  404. self.codebook_output = nn.Linear(
  405. config.dim,
  406. config.codebook_size * config.num_codebooks,
  407. bias=False,
  408. )
  409. self.apply(self._init_weights)
  410. def decode(self, result: BaseTransformerForwardResult) -> TransformerForwardResult:
  411. token_logits = result.logits
  412. x = result.hidden_states
  413. # Codebook
  414. codebook_logits = self.codebook_output(self.codebook_norm(x))
  415. codebook_logits = rearrange(
  416. codebook_logits, "b n (c d) -> b n c d", c=self.config.num_codebooks
  417. )
  418. return TransformerForwardResult(
  419. token_logits=token_logits,
  420. codebook_logits=codebook_logits,
  421. )
  422. def forward(
  423. self,
  424. inp: Tensor,
  425. key_padding_mask: Optional[Tensor] = None,
  426. ) -> TransformerForwardResult:
  427. result = super().forward(
  428. inp=inp,
  429. key_padding_mask=key_padding_mask,
  430. )
  431. return self.decode(result)
  432. def forward_generate(
  433. self, x: Tensor, input_pos: Optional[Tensor] = None
  434. ) -> TransformerForwardResult:
  435. result = super().forward_generate(x, input_pos)
  436. return self.decode(result)
  437. class DualARTransformer(BaseTransformer):
  438. def __init__(self, config: NaiveModelArgs, tokenizer: FishTokenizer) -> None:
  439. super().__init__(config, init_weights=False, tokenizer=tokenizer)
  440. # Project to fast dim if needed
  441. if config.fast_dim is not None and config.fast_dim != config.dim:
  442. self.fast_project_in = nn.Linear(config.dim, config.fast_dim)
  443. else:
  444. self.fast_project_in = nn.Identity()
  445. # Fast transformer
  446. self.fast_embeddings = nn.Embedding(config.codebook_size, config.fast_dim)
  447. # The equivalent bs is so large that sdpa doesn't work
  448. override_config = dataclasses.replace(
  449. config,
  450. dim=config.fast_dim,
  451. n_head=config.fast_n_head,
  452. n_local_heads=config.fast_n_local_heads,
  453. head_dim=config.fast_head_dim,
  454. intermediate_size=config.fast_intermediate_size,
  455. attention_qkv_bias=config.fast_attention_qkv_bias,
  456. )
  457. self.fast_layers = nn.ModuleList(
  458. TransformerBlock(override_config, use_sdpa=False)
  459. for _ in range(config.n_fast_layer)
  460. )
  461. self.fast_norm = RMSNorm(config.fast_dim, eps=config.norm_eps)
  462. self.fast_output = nn.Linear(
  463. config.fast_dim,
  464. config.codebook_size,
  465. bias=False,
  466. )
  467. self.register_buffer(
  468. "fast_freqs_cis",
  469. precompute_freqs_cis(
  470. config.num_codebooks,
  471. config.fast_dim // config.fast_n_head,
  472. config.rope_base,
  473. ),
  474. persistent=False,
  475. )
  476. self.apply(self._init_weights)
  477. def setup_caches(
  478. self, max_batch_size: int, max_seq_len: int, dtype: torch.dtype = torch.bfloat16
  479. ):
  480. super().setup_caches(max_batch_size, max_seq_len, dtype)
  481. head_dim = self.config.fast_dim // self.config.fast_n_head
  482. # Fast transformer
  483. # The max seq len here is the number of codebooks
  484. for b in self.fast_layers:
  485. b.attention.kv_cache = KVCache(
  486. max_batch_size,
  487. self.config.num_codebooks,
  488. self.config.fast_n_local_heads,
  489. head_dim,
  490. dtype=dtype,
  491. )
  492. def forward(
  493. self,
  494. inp: Tensor,
  495. key_padding_mask: Optional[Tensor] = None,
  496. ) -> TransformerForwardResult:
  497. parent_result = super().forward(inp, key_padding_mask)
  498. token_logits = parent_result.logits
  499. x = parent_result.hidden_states
  500. x = self.fast_project_in(x)
  501. # Fast transformer
  502. fast_seq_len = self.config.num_codebooks
  503. fast_mask = self.causal_mask[
  504. None, None, :fast_seq_len, :fast_seq_len
  505. ] # (B, N, Q, K)
  506. # Drop the last token and rotate left
  507. codebooks = inp[:, 1:-1, 1:]
  508. codebooks = F.pad(codebooks, (0, 1), value=0)
  509. codebook_embeddings = self.fast_embeddings(codebooks)
  510. x = torch.cat([x[:, None], codebook_embeddings], dim=1)
  511. b, s = x.size(0), x.size(2)
  512. x = rearrange(x, "b n s d -> (b s) n d") # flatten the batch and seq_len
  513. # Remove padded part
  514. codebooks = rearrange(codebooks, "b n s -> (b s) n")
  515. codebook_mask = (codebooks == 0).all(dim=-1)
  516. if torch.all(codebook_mask):
  517. # If all codebooks are padded, we keep first 8 to make sure the model runs
  518. codebook_mask[:8] = False
  519. x_bs, x_len = x.size(0), x.size(1)
  520. x = x[~codebook_mask]
  521. for layer in self.fast_layers:
  522. if self.config.use_gradient_checkpointing and self.training:
  523. x = checkpoint(
  524. layer, x, self.fast_freqs_cis, fast_mask, use_reentrant=True
  525. )
  526. else:
  527. x = layer(x, self.fast_freqs_cis, fast_mask)
  528. # unflatten the batch and num_codebooks
  529. fast_out = self.fast_norm(x)
  530. codebook_logits = self.fast_output(fast_out)
  531. # Re-pad the codebook_logits
  532. buffer = torch.zeros(
  533. x_bs,
  534. x_len,
  535. codebook_logits.size(-1),
  536. device=codebook_logits.device,
  537. dtype=codebook_logits.dtype,
  538. )
  539. buffer[~codebook_mask] = codebook_logits
  540. codebook_logits = buffer
  541. assert codebook_logits.shape[1] == self.config.num_codebooks
  542. codebook_logits = rearrange(
  543. codebook_logits,
  544. "(b s) n d -> b s n d",
  545. b=b,
  546. s=s,
  547. n=self.config.num_codebooks,
  548. )
  549. return TransformerForwardResult(
  550. token_logits=token_logits,
  551. codebook_logits=codebook_logits,
  552. )
  553. def forward_generate_fast(
  554. self, x: Tensor, input_pos: Optional[Tensor] = None
  555. ) -> Tensor:
  556. # Fast transformer
  557. x = x.view(1, 1, -1)
  558. fast_mask = self.causal_mask[
  559. None, None, input_pos, : self.config.num_codebooks
  560. ] # (B, N, Q, K)
  561. fast_freqs_cis = self.fast_freqs_cis[input_pos]
  562. for layer in self.fast_layers:
  563. x = layer(x, fast_freqs_cis, fast_mask, input_pos=input_pos)
  564. # unflatten the batch and num_codebooks
  565. fast_out = self.fast_norm(x) # only take the last token
  566. codebook_logits = self.fast_output(fast_out)
  567. return codebook_logits
  568. def forward_generate(
  569. self,
  570. x: Tensor,
  571. input_pos: Optional[Tensor] = None,
  572. vq_masks: Optional[Tensor] = None,
  573. ) -> TransformerForwardResult:
  574. x = super().forward_generate(x, input_pos, vq_masks)
  575. x.hidden_states = self.fast_project_in(x.hidden_states)
  576. return x
  577. class TransformerBlock(nn.Module):
  578. def __init__(self, config: BaseModelArgs, use_sdpa: bool = True) -> None:
  579. super().__init__()
  580. self.attention = Attention(config, use_sdpa=use_sdpa)
  581. self.feed_forward = FeedForward(config)
  582. self.ffn_norm = RMSNorm(config.dim, config.norm_eps)
  583. self.attention_norm = RMSNorm(config.dim, config.norm_eps)
  584. def forward(
  585. self, x: Tensor, freqs_cis: Tensor, mask: Tensor, input_pos: Tensor = None
  586. ) -> Tensor:
  587. h = x + self.attention(self.attention_norm(x), freqs_cis, mask, input_pos)
  588. out = h + self.feed_forward(self.ffn_norm(h))
  589. return out
  590. class Attention(nn.Module):
  591. def __init__(self, config: BaseModelArgs, use_sdpa: bool = True):
  592. super().__init__()
  593. assert config.dim % config.n_head == 0
  594. total_head_dim = (config.n_head + 2 * config.n_local_heads) * config.head_dim
  595. # key, query, value projections for all heads, but in a batch
  596. self.wqkv = nn.Linear(
  597. config.dim, total_head_dim, bias=config.attention_qkv_bias
  598. )
  599. self.wo = nn.Linear(config.dim, config.dim, bias=False)
  600. self.kv_cache = None
  601. self.dropout = config.dropout
  602. self.n_head = config.n_head
  603. self.head_dim = config.head_dim
  604. self.n_local_heads = config.n_local_heads
  605. self.dim = config.dim
  606. self.use_sdpa = use_sdpa
  607. self._register_load_state_dict_pre_hook(self.load_hook)
  608. def load_hook(self, state_dict, prefix, *args):
  609. if prefix + "wq.weight" in state_dict:
  610. wq = state_dict.pop(prefix + "wq.weight")
  611. wk = state_dict.pop(prefix + "wk.weight")
  612. wv = state_dict.pop(prefix + "wv.weight")
  613. state_dict[prefix + "wqkv.weight"] = torch.cat([wq, wk, wv])
  614. def forward(
  615. self,
  616. x: Tensor,
  617. freqs_cis: Tensor,
  618. mask: Tensor,
  619. input_pos: Optional[Tensor] = None,
  620. ) -> Tensor:
  621. bsz, seqlen, _ = x.shape
  622. kv_size = self.n_local_heads * self.head_dim
  623. q, k, v = self.wqkv(x).split([self.dim, kv_size, kv_size], dim=-1)
  624. q = q.view(bsz, seqlen, self.n_head, self.head_dim)
  625. k = k.view(bsz, seqlen, self.n_local_heads, self.head_dim)
  626. v = v.view(bsz, seqlen, self.n_local_heads, self.head_dim)
  627. q = apply_rotary_emb(q, freqs_cis)
  628. k = apply_rotary_emb(k, freqs_cis)
  629. q, k, v = map(lambda x: x.transpose(1, 2), (q, k, v))
  630. if self.kv_cache is not None:
  631. k, v = self.kv_cache.update(input_pos, k, v)
  632. k = k.repeat_interleave(self.n_head // self.n_local_heads, dim=1)
  633. v = v.repeat_interleave(self.n_head // self.n_local_heads, dim=1)
  634. if self.use_sdpa:
  635. if mask is None:
  636. with sdpa_kernel(SDPBackend.FLASH_ATTENTION):
  637. y = F.scaled_dot_product_attention(
  638. q,
  639. k,
  640. v,
  641. dropout_p=self.dropout if self.training else 0.0,
  642. is_causal=True,
  643. # No third party attn_mask here to use flash_attention
  644. )
  645. else:
  646. y = F.scaled_dot_product_attention(
  647. q,
  648. k,
  649. v,
  650. attn_mask=mask,
  651. dropout_p=self.dropout if self.training else 0.0,
  652. )
  653. else:
  654. y = self.eq_scaled_dot_product_attention(
  655. q,
  656. k,
  657. v,
  658. attn_mask=mask,
  659. dropout_p=self.dropout if self.training else 0.0,
  660. )
  661. y = y.transpose(1, 2).contiguous().view(bsz, seqlen, self.dim)
  662. return self.wo(y)
  663. def eq_scaled_dot_product_attention(
  664. self,
  665. query,
  666. key,
  667. value,
  668. attn_mask=None,
  669. dropout_p=0.0,
  670. ) -> torch.Tensor:
  671. # This is a standard scaled dot product attention
  672. # It's low efficient, but it doesn't raise cuda error
  673. L, S = query.size(-2), key.size(-2)
  674. scale_factor = 1 / math.sqrt(query.size(-1))
  675. attn_bias = torch.zeros(1, 1, L, S, dtype=query.dtype, device=query.device)
  676. if attn_mask is not None:
  677. if attn_mask.dtype == torch.bool:
  678. attn_bias.masked_fill_(attn_mask.logical_not(), float("-inf"))
  679. else:
  680. attn_bias += attn_mask
  681. attn_weight = query @ key.transpose(-2, -1) * scale_factor
  682. attn_weight += attn_bias
  683. attn_weight = torch.softmax(attn_weight, dim=-1)
  684. attn_weight = torch.dropout(attn_weight, dropout_p, train=True)
  685. return attn_weight @ value
  686. class FeedForward(nn.Module):
  687. def __init__(self, config: BaseModelArgs) -> None:
  688. super().__init__()
  689. self.w1 = nn.Linear(config.dim, config.intermediate_size, bias=False)
  690. self.w3 = nn.Linear(config.dim, config.intermediate_size, bias=False)
  691. self.w2 = nn.Linear(config.intermediate_size, config.dim, bias=False)
  692. def forward(self, x: Tensor) -> Tensor:
  693. return self.w2(F.silu(self.w1(x)) * self.w3(x))
  694. class RMSNorm(nn.Module):
  695. def __init__(self, dim: int, eps: float = 1e-5):
  696. super().__init__()
  697. self.eps = eps
  698. self.weight = nn.Parameter(torch.ones(dim))
  699. def _norm(self, x):
  700. return x * torch.rsqrt(torch.mean(x * x, dim=-1, keepdim=True) + self.eps)
  701. def forward(self, x: Tensor) -> Tensor:
  702. output = self._norm(x.float()).type_as(x)
  703. return output * self.weight
  704. def precompute_freqs_cis(seq_len: int, n_elem: int, base: int = 10000) -> Tensor:
  705. freqs = 1.0 / (
  706. base ** (torch.arange(0, n_elem, 2)[: (n_elem // 2)].float() / n_elem)
  707. )
  708. t = torch.arange(seq_len, device=freqs.device)
  709. freqs = torch.outer(t, freqs)
  710. freqs_cis = torch.polar(torch.ones_like(freqs), freqs)
  711. cache = torch.stack([freqs_cis.real, freqs_cis.imag], dim=-1)
  712. return cache.to(dtype=torch.bfloat16)
  713. def apply_rotary_emb(x: Tensor, freqs_cis: Tensor) -> Tensor:
  714. xshaped = x.float().reshape(*x.shape[:-1], -1, 2)
  715. freqs_cis = freqs_cis.view(1, xshaped.size(1), 1, xshaped.size(3), 2)
  716. x_out2 = torch.stack(
  717. [
  718. xshaped[..., 0] * freqs_cis[..., 0] - xshaped[..., 1] * freqs_cis[..., 1],
  719. xshaped[..., 1] * freqs_cis[..., 0] + xshaped[..., 0] * freqs_cis[..., 1],
  720. ],
  721. -1,
  722. )
  723. x_out2 = x_out2.flatten(3)
  724. return x_out2.type_as(x)