extract_vq.py 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. import os
  2. import subprocess as sp
  3. import sys
  4. import time
  5. from datetime import timedelta
  6. from functools import lru_cache
  7. from pathlib import Path
  8. from random import Random
  9. import click
  10. import numpy as np
  11. import torch
  12. import torchaudio
  13. from hydra import compose, initialize
  14. from hydra.utils import instantiate
  15. from lightning import LightningModule
  16. from loguru import logger
  17. from omegaconf import OmegaConf
  18. from tools.file import AUDIO_EXTENSIONS, list_files, load_filelist
  19. # register eval resolver
  20. OmegaConf.register_new_resolver("eval", eval)
  21. # This file is used to convert the audio files to text files using the Whisper model.
  22. # It's mainly used to generate the training data for the VQ model.
  23. RANK = int(os.environ.get("SLURM_PROCID", 0))
  24. WORLD_SIZE = int(os.environ.get("SLURM_NTASKS", 1))
  25. logger_format = (
  26. "<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | "
  27. "<level>{level: <8}</level> | "
  28. "<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> | "
  29. "{extra[rank]} - <level>{message}</level>"
  30. )
  31. logger.configure(extra={"rank": f"RANK: {RANK} / {WORLD_SIZE}"})
  32. logger.remove()
  33. logger.add(sys.stderr, format=logger_format)
  34. @lru_cache(maxsize=1)
  35. def get_model(
  36. config_name: str = "firefly_gan_vq",
  37. checkpoint_path: str = "checkpoints/fish-speech-1.4/firefly-gan-vq-fsq-8x1024-21hz-generator.pth",
  38. device: str | torch.device = "cuda",
  39. ):
  40. with initialize(version_base="1.3", config_path="../../fish_speech/configs"):
  41. cfg = compose(config_name=config_name)
  42. model = instantiate(cfg)
  43. state_dict = torch.load(
  44. checkpoint_path,
  45. map_location=device,
  46. )
  47. if "state_dict" in state_dict:
  48. state_dict = state_dict["state_dict"]
  49. if any("generator" in k for k in state_dict):
  50. state_dict = {
  51. k.replace("generator.", ""): v
  52. for k, v in state_dict.items()
  53. if "generator." in k
  54. }
  55. model.load_state_dict(state_dict, strict=False)
  56. model.eval()
  57. model.to(device)
  58. logger.info(f"Loaded model")
  59. return model
  60. @torch.inference_mode()
  61. def process_batch(files: list[Path], model) -> float:
  62. wavs = []
  63. audio_lengths = []
  64. new_files = []
  65. max_length = total_time = 0
  66. for file in files:
  67. try:
  68. wav, sr = torchaudio.load(
  69. str(file), backend="sox" if sys.platform == "linux" else "soundfile"
  70. ) # Need to install libsox-dev
  71. except Exception as e:
  72. logger.error(f"Error reading {file}: {e}")
  73. continue
  74. if wav.shape[0] > 1:
  75. wav = wav.mean(dim=0, keepdim=True)
  76. wav = torchaudio.functional.resample(
  77. wav.cuda(), sr, model.spec_transform.sample_rate
  78. )[0]
  79. total_time += len(wav) / model.spec_transform.sample_rate
  80. max_length = max(max_length, len(wav))
  81. wavs.append(wav)
  82. audio_lengths.append(len(wav))
  83. new_files.append(file)
  84. files = new_files
  85. # Pad to max length
  86. for i, wav in enumerate(wavs):
  87. wavs[i] = torch.nn.functional.pad(wav, (0, max_length - len(wav)), "constant")
  88. audios = torch.stack(wavs, dim=0)[:, None]
  89. audio_lengths = torch.tensor(audio_lengths, device=model.device, dtype=torch.long)
  90. # Calculate lengths
  91. indices, feature_lengths = model.encode(audios, audio_lengths)
  92. # Save to disk
  93. outputs = indices.cpu().numpy()
  94. for file, length, feature, audio_length in zip(
  95. files, feature_lengths, outputs, audio_lengths
  96. ):
  97. feature = feature[:, :length]
  98. # (T,)
  99. with open(file.with_suffix(".npy"), "wb") as f:
  100. np.save(f, feature)
  101. return total_time
  102. @click.command()
  103. @click.argument("folder")
  104. @click.option("--num-workers", default=1)
  105. @click.option("--config-name", default="firefly_gan_vq")
  106. @click.option(
  107. "--checkpoint-path",
  108. default="checkpoints/fish-speech-1.4/firefly-gan-vq-fsq-8x1024-21hz-generator.pth",
  109. )
  110. @click.option("--batch-size", default=64)
  111. @click.option("--filelist", default=None, type=Path)
  112. def main(
  113. folder: str,
  114. num_workers: int,
  115. config_name: str,
  116. checkpoint_path: str,
  117. batch_size: int,
  118. filelist: Path,
  119. ):
  120. if num_workers > 1 and WORLD_SIZE != num_workers:
  121. assert WORLD_SIZE == 1, "You should either use SLURM or this launcher, not both"
  122. logger.info(f"Spawning {num_workers} workers")
  123. if torch.cuda.is_available():
  124. visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None)
  125. if visible_devices is None:
  126. visible_devices = list(range(torch.cuda.device_count()))
  127. else:
  128. visible_devices = visible_devices.split(",")
  129. else:
  130. # Set to empty string to avoid using GPU
  131. visible_devices = [""]
  132. processes = []
  133. for i in range(num_workers):
  134. env = os.environ.copy()
  135. env["CUDA_VISIBLE_DEVICES"] = str(visible_devices[i % len(visible_devices)])
  136. env["SLURM_PROCID"] = str(i)
  137. env["SLURM_NTASKS"] = str(num_workers)
  138. processes.append(
  139. sp.Popen(
  140. [sys.executable] + sys.argv.copy(),
  141. env=env,
  142. )
  143. )
  144. for p in processes:
  145. p.wait()
  146. logger.info(f"All workers finished")
  147. return
  148. # This is a worker
  149. logger.info(f"Starting worker")
  150. if filelist:
  151. files = [i[0] for i in load_filelist(filelist)]
  152. else:
  153. files = list_files(folder, AUDIO_EXTENSIONS, recursive=True, sort=False)
  154. print(f"Found {len(files)} files")
  155. files = [Path(f) for f in files if not Path(f).with_suffix(".npy").exists()]
  156. total_files = len(files)
  157. files = files[RANK::WORLD_SIZE]
  158. logger.info(f"Processing {len(files)}/{total_files} files")
  159. # Batch processing
  160. total_time = 0
  161. begin_time = time.time()
  162. processed_files = 0
  163. model = get_model(config_name, checkpoint_path)
  164. for n_batch, idx in enumerate(range(0, len(files), batch_size)):
  165. batch = files[idx : idx + batch_size]
  166. batch_time = process_batch(batch, model)
  167. total_time += batch_time
  168. processed_files += len(batch)
  169. if (n_batch + 1) % 10 == 0:
  170. eta = (
  171. (time.time() - begin_time)
  172. / processed_files
  173. * (len(files) - processed_files)
  174. )
  175. logger.info(
  176. f"Processed {processed_files} files, {total_time / 3600:.2f} hours of audio, "
  177. + f"ETA: {timedelta(seconds=round(eta))}s"
  178. )
  179. logger.info(
  180. f"Finished processing {len(files)} files, {total_time / 3600:.2f} hours of audio"
  181. )
  182. if __name__ == "__main__":
  183. main()