build_dataset.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. import re
  2. from collections import defaultdict
  3. from multiprocessing import Pool
  4. from pathlib import Path
  5. import click
  6. import numpy as np
  7. import yaml
  8. from loguru import logger
  9. from tqdm import tqdm
  10. from fish_speech.datasets.protos.text_data_pb2 import Semantics, Sentence, TextData
  11. from fish_speech.datasets.protos.text_data_stream import pack_pb_stream
  12. from fish_speech.text import g2p
  13. from fish_speech.utils.file import load_filelist
  14. # To avoid CPU overload
  15. os.environ["MKL_NUM_THREADS"] = "1"
  16. os.environ["OMP_NUM_THREADS"] = "1"
  17. def task_generator_yaml(config):
  18. with open(config, "r") as f:
  19. config = yaml.load(f, Loader=yaml.FullLoader)
  20. for row in config["datasets"]:
  21. root, source, languages, extension, parent_level = (
  22. row["root"],
  23. row["source"],
  24. row["languages"],
  25. row["extension"],
  26. row["group_parent_level"],
  27. )
  28. if isinstance(parent_level, int):
  29. parent_level = [parent_level]
  30. # Load the files
  31. files = list(tqdm(Path(root).rglob("*.npy"), desc=f"Loading {root}"))
  32. files = sorted(files)
  33. grouped_files = defaultdict(list)
  34. for file in tqdm(files, desc=f"Grouping {root}"):
  35. all_parents = []
  36. pointer = file
  37. while pointer.parent.name:
  38. all_parents.append(pointer.parent.name)
  39. pointer = pointer.parent
  40. ps = []
  41. for level in parent_level:
  42. ps.append(all_parents[level - 1])
  43. p = "-".join(ps)
  44. grouped_files[p].append(file)
  45. logger.info(f"Found {len(grouped_files)} groups in {root}")
  46. for name, subset in grouped_files.items():
  47. yield name, subset, source, languages, extension
  48. def task_generator_filelist(filelist):
  49. grouped_files = defaultdict(list)
  50. for filename, speaker, languages, text in load_filelist(filelist):
  51. grouped_files[speaker].append((Path(filename), text, languages))
  52. logger.info(f"Found {len(grouped_files)} groups in {filelist}")
  53. for speaker, values in grouped_files.items():
  54. yield speaker, values, "filelist", languages, None
  55. def run_task(task):
  56. name, subset, source, languages, extension = task
  57. # Parse the files
  58. sentences = []
  59. for file in subset:
  60. if isinstance(file, tuple):
  61. file, text, languages = file
  62. else:
  63. text = None
  64. np_file = file.with_suffix(".npy")
  65. if np_file.exists() is False:
  66. logger.warning(f"Can't find {np_file}")
  67. continue
  68. if text is None:
  69. txt_file = file.with_suffix(extension)
  70. if txt_file.exists() is False:
  71. logger.warning(f"Can't find {txt_file}")
  72. continue
  73. with open(txt_file, "r") as f:
  74. text = f.read().strip()
  75. # Simple cleaning: replace { xxx } and < xxx > with space
  76. text = re.sub(r"\{.*?\}", " ", text)
  77. text = re.sub(r"<.*?>", " ", text)
  78. text = re.sub(r"\s+", " ", text)
  79. try:
  80. phones = [v for _, v in g2p(text, order=languages)]
  81. semantics = np.load(np_file)
  82. except Exception as e:
  83. logger.error(f"Failed to parse {file}: {e}")
  84. continue
  85. if isinstance(semantics, np.ndarray):
  86. semantics = semantics.tolist()
  87. sentences.append(
  88. Sentence(
  89. text=text,
  90. phones=phones,
  91. semantics=[Semantics(values=s) for s in semantics],
  92. )
  93. )
  94. # Pack the sentences
  95. return pack_pb_stream(
  96. TextData(
  97. source=source,
  98. name=name,
  99. languages=languages,
  100. sentences=sentences,
  101. )
  102. )
  103. @click.command()
  104. @click.option(
  105. "--config", type=click.Path(), default="fish_speech/configs/data/finetune.yaml"
  106. )
  107. @click.option("--output", type=click.Path(), default="data/quantized-dataset-ft.protos")
  108. @click.option("--filelist", type=click.Path(), default=None)
  109. @click.option("--num-workers", type=int, default=16)
  110. def main(config, output, filelist, num_workers):
  111. dataset_fp = open(output, "wb")
  112. generator_fn = (
  113. task_generator_yaml(config)
  114. if filelist is None
  115. else task_generator_filelist(filelist)
  116. )
  117. with Pool(num_workers) as p:
  118. for result in tqdm(p.imap_unordered(run_task, generator_fn)):
  119. dataset_fp.write(result)
  120. dataset_fp.close()
  121. if __name__ == "__main__":
  122. main()