lit_module.py 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. import itertools
  2. from typing import Any, Callable
  3. import lightning as L
  4. import torch
  5. import torch.nn.functional as F
  6. import wandb
  7. from diffusers.schedulers import DDIMScheduler, UniPCMultistepScheduler
  8. from diffusers.utils.torch_utils import randn_tensor
  9. from lightning.pytorch.loggers import TensorBoardLogger, WandbLogger
  10. from matplotlib import pyplot as plt
  11. from torch import nn
  12. from tqdm import tqdm
  13. from fish_speech.models.vq_diffusion.convnext_1d import ConvNext1DModel
  14. from fish_speech.models.vqgan.modules.encoders import (
  15. SpeakerEncoder,
  16. TextEncoder,
  17. VQEncoder,
  18. )
  19. from fish_speech.models.vqgan.utils import plot_mel, sequence_mask
  20. class VQDiffusion(L.LightningModule):
  21. def __init__(
  22. self,
  23. optimizer: Callable,
  24. lr_scheduler: Callable,
  25. mel_transform: nn.Module,
  26. vq_encoder: VQEncoder,
  27. speaker_encoder: SpeakerEncoder,
  28. text_encoder: TextEncoder,
  29. denoiser: ConvNext1DModel,
  30. vocoder: nn.Module,
  31. hop_length: int = 640,
  32. sample_rate: int = 32000,
  33. ):
  34. super().__init__()
  35. # Model parameters
  36. self.optimizer_builder = optimizer
  37. self.lr_scheduler_builder = lr_scheduler
  38. # Generator and discriminators
  39. self.mel_transform = mel_transform
  40. self.noise_scheduler_train = DDIMScheduler(num_train_timesteps=1000)
  41. self.noise_scheduler_infer = UniPCMultistepScheduler(num_train_timesteps=1000)
  42. # Modules
  43. self.vq_encoder = vq_encoder
  44. self.speaker_encoder = speaker_encoder
  45. self.text_encoder = text_encoder
  46. self.denoiser = denoiser
  47. self.vocoder = vocoder
  48. self.hop_length = hop_length
  49. self.sampling_rate = sample_rate
  50. # Freeze vocoder
  51. for param in self.vocoder.parameters():
  52. param.requires_grad = False
  53. def configure_optimizers(self):
  54. optimizer = self.optimizer_builder(self.parameters())
  55. lr_scheduler = self.lr_scheduler_builder(optimizer)
  56. return {
  57. "optimizer": optimizer,
  58. "lr_scheduler": {
  59. "scheduler": lr_scheduler,
  60. "interval": "step",
  61. },
  62. }
  63. def normalize_mels(self, x):
  64. # x is in range -10.1 to 3.1, normalize to -1 to 1
  65. x_min, x_max = -10.1, 3.1
  66. return (x - x_min) / (x_max - x_min) * 2 - 1
  67. def denormalize_mels(self, x):
  68. x_min, x_max = -10.1, 3.1
  69. return (x + 1) / 2 * (x_max - x_min) + x_min
  70. def training_step(self, batch, batch_idx):
  71. audios, audio_lengths = batch["audios"], batch["audio_lengths"]
  72. features, feature_lengths = batch["features"], batch["feature_lengths"]
  73. audios = audios.float()
  74. features = features.float().mT
  75. audios = audios[:, None, :]
  76. with torch.no_grad():
  77. gt_mels = self.mel_transform(audios)
  78. mel_lengths = audio_lengths // self.hop_length
  79. feature_masks = torch.unsqueeze(
  80. sequence_mask(feature_lengths, features.shape[2]), 1
  81. ).to(gt_mels.dtype)
  82. mel_masks = torch.unsqueeze(sequence_mask(mel_lengths, gt_mels.shape[2]), 1).to(
  83. gt_mels.dtype
  84. )
  85. speaker_features = self.speaker_encoder(gt_mels, mel_masks)
  86. vq_features, vq_loss = self.vq_encoder(features, feature_masks)
  87. # vq_features is 50 hz, need to convert to true mel size
  88. vq_features = F.interpolate(vq_features, size=gt_mels.shape[2], mode="nearest")
  89. text_features = self.text_encoder(vq_features, mel_masks, g=speaker_features)
  90. # Sample noise that we'll add to the images
  91. normalized_gt_mels = self.normalize_mels(gt_mels)
  92. noise = torch.randn_like(normalized_gt_mels)
  93. # Sample a random timestep for each image
  94. timesteps = torch.randint(
  95. 0,
  96. self.noise_scheduler_train.config.num_train_timesteps,
  97. (normalized_gt_mels.shape[0],),
  98. device=normalized_gt_mels.device,
  99. ).long()
  100. # Add noise to the clean images according to the noise magnitude at each timestep
  101. # (this is the forward diffusion process)
  102. noisy_images = self.noise_scheduler_train.add_noise(
  103. normalized_gt_mels, noise, timesteps
  104. )
  105. # Predict
  106. model_output = self.denoiser(noisy_images, timesteps, mel_masks, text_features)
  107. # MSE loss without the mask
  108. noise_loss = ((model_output * mel_masks - noise * mel_masks) ** 2).sum() / (
  109. mel_masks.sum() * gt_mels.shape[1]
  110. )
  111. self.log(
  112. "train/noise_loss",
  113. noise_loss,
  114. on_step=True,
  115. on_epoch=False,
  116. prog_bar=True,
  117. logger=True,
  118. sync_dist=True,
  119. )
  120. self.log(
  121. "train/vq_loss",
  122. vq_loss,
  123. on_step=True,
  124. on_epoch=False,
  125. prog_bar=True,
  126. logger=True,
  127. sync_dist=True,
  128. )
  129. return noise_loss + vq_loss
  130. def validation_step(self, batch: Any, batch_idx: int):
  131. audios, audio_lengths = batch["audios"], batch["audio_lengths"]
  132. features, feature_lengths = batch["features"], batch["feature_lengths"]
  133. audios = audios.float()
  134. features = features.float().mT
  135. audios = audios[:, None, :]
  136. gt_mels = self.mel_transform(audios)
  137. mel_lengths = audio_lengths // self.hop_length
  138. feature_masks = torch.unsqueeze(
  139. sequence_mask(feature_lengths, features.shape[2]), 1
  140. ).to(gt_mels.dtype)
  141. mel_masks = torch.unsqueeze(sequence_mask(mel_lengths, gt_mels.shape[2]), 1).to(
  142. gt_mels.dtype
  143. )
  144. speaker_features = self.speaker_encoder(gt_mels, mel_masks)
  145. vq_features, _ = self.vq_encoder(features, feature_masks)
  146. # vq_features is 50 hz, need to convert to true mel size
  147. vq_features = F.interpolate(vq_features, size=gt_mels.shape[2], mode="nearest")
  148. text_features = self.text_encoder(vq_features, mel_masks, g=speaker_features)
  149. # Begin sampling
  150. sampled_mels = torch.randn_like(gt_mels)
  151. self.noise_scheduler_infer.set_timesteps(100)
  152. for t in tqdm(self.noise_scheduler_infer.timesteps):
  153. timesteps = torch.tensor([t], device=sampled_mels.device, dtype=torch.long)
  154. # 1. predict noise model_output
  155. model_output = self.denoiser(
  156. sampled_mels, timesteps, mel_masks, text_features
  157. )
  158. # 2. compute previous image: x_t -> x_t-1
  159. sampled_mels = self.noise_scheduler_infer.step(
  160. model_output, t, sampled_mels
  161. ).prev_sample
  162. sampled_mels = self.denormalize_mels(sampled_mels)
  163. with torch.autocast(device_type=sampled_mels.device.type, enabled=False):
  164. # Run vocoder on fp32
  165. fake_audios = self.vocoder.decode(sampled_mels.float())
  166. mel_loss = F.l1_loss(gt_mels, sampled_mels)
  167. self.log(
  168. "val/mel_loss",
  169. mel_loss,
  170. on_step=False,
  171. on_epoch=True,
  172. prog_bar=True,
  173. logger=True,
  174. sync_dist=True,
  175. )
  176. for idx, (
  177. mel,
  178. gen_mel,
  179. audio,
  180. gen_audio,
  181. audio_len,
  182. ) in enumerate(
  183. zip(
  184. gt_mels,
  185. sampled_mels,
  186. audios,
  187. fake_audios,
  188. audio_lengths,
  189. )
  190. ):
  191. mel_len = audio_len // self.hop_length
  192. image_mels = plot_mel(
  193. [
  194. gen_mel[:, :mel_len],
  195. mel[:, :mel_len],
  196. ],
  197. [
  198. "Generated Spectrogram",
  199. "Ground-Truth Spectrogram",
  200. ],
  201. )
  202. if isinstance(self.logger, WandbLogger):
  203. self.logger.experiment.log(
  204. {
  205. "reconstruction_mel": wandb.Image(image_mels, caption="mels"),
  206. "wavs": [
  207. wandb.Audio(
  208. audio[0, :audio_len],
  209. sample_rate=self.sampling_rate,
  210. caption="gt",
  211. ),
  212. wandb.Audio(
  213. gen_audio[0, :audio_len],
  214. sample_rate=self.sampling_rate,
  215. caption="prediction",
  216. ),
  217. ],
  218. },
  219. )
  220. if isinstance(self.logger, TensorBoardLogger):
  221. self.logger.experiment.add_figure(
  222. f"sample-{idx}/mels",
  223. image_mels,
  224. global_step=self.global_step,
  225. )
  226. self.logger.experiment.add_audio(
  227. f"sample-{idx}/wavs/gt",
  228. audio[0, :audio_len],
  229. self.global_step,
  230. sample_rate=self.sampling_rate,
  231. )
  232. self.logger.experiment.add_audio(
  233. f"sample-{idx}/wavs/prediction",
  234. gen_audio[0, :audio_len],
  235. self.global_step,
  236. sample_rate=self.sampling_rate,
  237. )
  238. plt.close(image_mels)