lit_module.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. import itertools
  2. from typing import Any, Callable
  3. import lightning as L
  4. import torch
  5. import torch.nn.functional as F
  6. import wandb
  7. from lightning.pytorch.loggers import TensorBoardLogger, WandbLogger
  8. from matplotlib import pyplot as plt
  9. from torch import nn
  10. from vector_quantize_pytorch import ResidualLFQ
  11. from fish_speech.models.vqgan.losses import (
  12. discriminator_loss,
  13. feature_loss,
  14. generator_loss,
  15. kl_loss_normal,
  16. )
  17. from fish_speech.models.vqgan.modules.discriminator import EnsembleDiscriminator
  18. from fish_speech.models.vqgan.modules.models import SynthesizerTrn
  19. from fish_speech.models.vqgan.utils import plot_mel, sequence_mask, slice_segments
  20. class VQGAN(L.LightningModule):
  21. def __init__(
  22. self,
  23. optimizer: Callable,
  24. lr_scheduler: Callable,
  25. generator: SynthesizerTrn,
  26. discriminator: EnsembleDiscriminator,
  27. mel_transform: nn.Module,
  28. segment_size: int = 20480,
  29. hop_length: int = 640,
  30. sample_rate: int = 32000,
  31. ):
  32. super().__init__()
  33. # Model parameters
  34. self.optimizer_builder = optimizer
  35. self.lr_scheduler_builder = lr_scheduler
  36. # Generator and discriminators
  37. self.generator = generator
  38. self.discriminator = discriminator
  39. self.mel_transform = mel_transform
  40. # Crop length for saving memory
  41. self.segment_size = segment_size
  42. self.hop_length = hop_length
  43. self.sampling_rate = sample_rate
  44. # Disable automatic optimization
  45. self.automatic_optimization = False
  46. def configure_optimizers(self):
  47. # Need two optimizers and two schedulers
  48. optimizer_generator = self.optimizer_builder(self.generator.parameters())
  49. optimizer_discriminator = self.optimizer_builder(
  50. self.discriminator.parameters()
  51. )
  52. lr_scheduler_generator = self.lr_scheduler_builder(optimizer_generator)
  53. lr_scheduler_discriminator = self.lr_scheduler_builder(optimizer_discriminator)
  54. return (
  55. {
  56. "optimizer": optimizer_generator,
  57. "lr_scheduler": {
  58. "scheduler": lr_scheduler_generator,
  59. "interval": "step",
  60. "name": "optimizer/generator",
  61. },
  62. },
  63. {
  64. "optimizer": optimizer_discriminator,
  65. "lr_scheduler": {
  66. "scheduler": lr_scheduler_discriminator,
  67. "interval": "step",
  68. "name": "optimizer/discriminator",
  69. },
  70. },
  71. )
  72. def training_step(self, batch, batch_idx):
  73. optim_g, optim_d = self.optimizers()
  74. audios, audio_lengths = batch["audios"], batch["audio_lengths"]
  75. features, feature_lengths = batch["features"], batch["feature_lengths"]
  76. audios = audios[:, None, :]
  77. audios = audios.float()
  78. features = features.float()
  79. with torch.no_grad():
  80. gt_mels = self.mel_transform(audios)
  81. assert (
  82. gt_mels.shape[2] == features.shape[1]
  83. ), f"Shapes do not match: {gt_mels.shape}, {features.shape}"
  84. (
  85. y_hat,
  86. ids_slice,
  87. x_mask,
  88. y_mask,
  89. (z_q_audio, z_p),
  90. (m_p_text, logs_p_text),
  91. (m_q, logs_q),
  92. ) = self.generator(features, feature_lengths, gt_mels, feature_lengths)
  93. y_hat_mel = self.mel_transform(y_hat.squeeze(1))
  94. y_mel = slice_segments(gt_mels, ids_slice, self.segment_size // self.hop_length)
  95. y = slice_segments(audios, ids_slice * self.hop_length, self.segment_size)
  96. # Discriminator
  97. y_d_hat_r, y_d_hat_g, _, _ = self.discriminator(y, y_hat.detach())
  98. with torch.autocast(device_type=audios.device.type, enabled=False):
  99. loss_disc_all, _, _ = discriminator_loss(y_d_hat_r, y_d_hat_g)
  100. self.log(
  101. "train/discriminator/loss",
  102. loss_disc_all,
  103. on_step=True,
  104. on_epoch=False,
  105. prog_bar=True,
  106. logger=True,
  107. sync_dist=True,
  108. )
  109. optim_d.zero_grad()
  110. self.manual_backward(loss_disc_all)
  111. self.clip_gradients(
  112. optim_d, gradient_clip_val=1000.0, gradient_clip_algorithm="norm"
  113. )
  114. optim_d.step()
  115. y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.discriminator(y, y_hat)
  116. with torch.autocast(device_type=audios.device.type, enabled=False):
  117. loss_mel = F.l1_loss(y_mel, y_hat_mel)
  118. loss_adv, _ = generator_loss(y_d_hat_g)
  119. loss_fm = feature_loss(fmap_r, fmap_g)
  120. # x_mask,
  121. # y_mask,
  122. # (z_q_audio, z_p),
  123. # (m_p_text, logs_p_text),
  124. # (m_q, logs_q),
  125. loss_kl = kl_loss_normal(
  126. m_q,
  127. logs_q,
  128. m_p_text,
  129. logs_p_text,
  130. x_mask,
  131. )
  132. loss_gen_all = loss_mel * 45 + loss_fm + loss_adv + loss_kl * 0.05
  133. self.log(
  134. "train/generator/loss",
  135. loss_gen_all,
  136. on_step=True,
  137. on_epoch=False,
  138. prog_bar=True,
  139. logger=True,
  140. sync_dist=True,
  141. )
  142. self.log(
  143. "train/generator/loss_mel",
  144. loss_mel,
  145. on_step=True,
  146. on_epoch=False,
  147. prog_bar=False,
  148. logger=True,
  149. sync_dist=True,
  150. )
  151. self.log(
  152. "train/generator/loss_fm",
  153. loss_fm,
  154. on_step=True,
  155. on_epoch=False,
  156. prog_bar=False,
  157. logger=True,
  158. sync_dist=True,
  159. )
  160. self.log(
  161. "train/generator/loss_adv",
  162. loss_adv,
  163. on_step=True,
  164. on_epoch=False,
  165. prog_bar=False,
  166. logger=True,
  167. sync_dist=True,
  168. )
  169. self.log(
  170. "train/generator/loss_kl",
  171. loss_kl,
  172. on_step=True,
  173. on_epoch=False,
  174. prog_bar=False,
  175. logger=True,
  176. sync_dist=True,
  177. )
  178. # self.log(
  179. # "train/generator/loss_vq",
  180. # prior.loss,
  181. # on_step=True,
  182. # on_epoch=False,
  183. # prog_bar=False,
  184. # logger=True,
  185. # sync_dist=True,
  186. # )
  187. optim_g.zero_grad()
  188. self.manual_backward(loss_gen_all)
  189. self.clip_gradients(
  190. optim_g, gradient_clip_val=1000.0, gradient_clip_algorithm="norm"
  191. )
  192. optim_g.step()
  193. # Manual LR Scheduler
  194. scheduler_g, scheduler_d = self.lr_schedulers()
  195. scheduler_g.step()
  196. scheduler_d.step()
  197. def validation_step(self, batch: Any, batch_idx: int):
  198. audios, audio_lengths = batch["audios"], batch["audio_lengths"]
  199. features, feature_lengths = batch["features"], batch["feature_lengths"]
  200. audios = audios.float()
  201. features = features.float()
  202. audios = audios[:, None, :]
  203. gt_mels = self.mel_transform(audios)
  204. assert (
  205. gt_mels.shape[2] == features.shape[1]
  206. ), f"Shapes do not match: {gt_mels.shape}, {features.shape}"
  207. fake_audios = self.generator.infer(features, feature_lengths, gt_mels)
  208. posterior_audios = self.generator.reconstruct(gt_mels, feature_lengths)
  209. fake_mels = self.mel_transform(fake_audios.squeeze(1))
  210. posterior_mels = self.mel_transform(posterior_audios.squeeze(1))
  211. min_mel_length = min(gt_mels.shape[-1], fake_mels.shape[-1])
  212. gt_mels = gt_mels[:, :, :min_mel_length]
  213. fake_mels = fake_mels[:, :, :min_mel_length]
  214. posterior_mels = posterior_mels[:, :, :min_mel_length]
  215. mel_loss = F.l1_loss(gt_mels, fake_mels)
  216. self.log(
  217. "val/mel_loss",
  218. mel_loss,
  219. on_step=False,
  220. on_epoch=True,
  221. prog_bar=True,
  222. logger=True,
  223. sync_dist=True,
  224. )
  225. for idx, (
  226. mel,
  227. gen_mel,
  228. post_mel,
  229. audio,
  230. gen_audio,
  231. post_audio,
  232. audio_len,
  233. ) in enumerate(
  234. zip(
  235. gt_mels,
  236. fake_mels,
  237. posterior_mels,
  238. audios,
  239. fake_audios,
  240. posterior_audios,
  241. audio_lengths,
  242. )
  243. ):
  244. mel_len = audio_len // self.hop_length
  245. image_mels = plot_mel(
  246. [
  247. gen_mel[:, :mel_len],
  248. post_mel[:, :mel_len],
  249. mel[:, :mel_len],
  250. ],
  251. [
  252. "Generated Spectrogram",
  253. "Posterior Spectrogram",
  254. "Ground-Truth Spectrogram",
  255. ],
  256. )
  257. if isinstance(self.logger, WandbLogger):
  258. self.logger.experiment.log(
  259. {
  260. "reconstruction_mel": wandb.Image(image_mels, caption="mels"),
  261. "wavs": [
  262. wandb.Audio(
  263. audio[0, :audio_len],
  264. sample_rate=self.sampling_rate,
  265. caption="gt",
  266. ),
  267. wandb.Audio(
  268. gen_audio[0, :audio_len],
  269. sample_rate=self.sampling_rate,
  270. caption="prediction",
  271. ),
  272. wandb.Audio(
  273. post_audio[0, :audio_len],
  274. sample_rate=self.sampling_rate,
  275. caption="posterior",
  276. ),
  277. ],
  278. },
  279. )
  280. if isinstance(self.logger, TensorBoardLogger):
  281. self.logger.experiment.add_figure(
  282. f"sample-{idx}/mels",
  283. image_mels,
  284. global_step=self.global_step,
  285. )
  286. self.logger.experiment.add_audio(
  287. f"sample-{idx}/wavs/gt",
  288. audio[0, :audio_len],
  289. self.global_step,
  290. sample_rate=self.sampling_rate,
  291. )
  292. self.logger.experiment.add_audio(
  293. f"sample-{idx}/wavs/prediction",
  294. gen_audio[0, :audio_len],
  295. self.global_step,
  296. sample_rate=self.sampling_rate,
  297. )
  298. self.logger.experiment.add_audio(
  299. f"sample-{idx}/wavs/posterior",
  300. post_audio[0, :audio_len],
  301. self.global_step,
  302. sample_rate=self.sampling_rate,
  303. )
  304. plt.close(image_mels)