build_dataset.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. import re
  2. from collections import defaultdict
  3. from multiprocessing import Pool
  4. from pathlib import Path
  5. import click
  6. import numpy as np
  7. import yaml
  8. from loguru import logger
  9. from tqdm import tqdm
  10. from fish_speech.datasets.protos.text_data_pb2 import Semantics, Sentence, TextData
  11. from fish_speech.datasets.protos.text_data_stream import pack_pb_stream
  12. from fish_speech.text import g2p
  13. from fish_speech.utils.file import load_filelist
  14. def task_generator_yaml(config):
  15. with open(config, "r") as f:
  16. config = yaml.load(f, Loader=yaml.FullLoader)
  17. for row in config["datasets"]:
  18. root, source, languages, extension, parent_level = (
  19. row["root"],
  20. row["source"],
  21. row["languages"],
  22. row["extension"],
  23. row["group_parent_level"],
  24. )
  25. if isinstance(parent_level, int):
  26. parent_level = [parent_level]
  27. # Load the files
  28. files = list(tqdm(Path(root).rglob("*.npy"), desc=f"Loading {root}"))
  29. files = sorted(files)
  30. grouped_files = defaultdict(list)
  31. for file in tqdm(files, desc=f"Grouping {root}"):
  32. all_parents = []
  33. pointer = file
  34. while pointer.parent.name:
  35. all_parents.append(pointer.parent.name)
  36. pointer = pointer.parent
  37. ps = []
  38. for level in parent_level:
  39. ps.append(all_parents[level - 1])
  40. p = "-".join(ps)
  41. grouped_files[p].append(file)
  42. logger.info(f"Found {len(grouped_files)} groups in {root}")
  43. for name, subset in grouped_files.items():
  44. yield name, subset, source, languages, extension
  45. def task_generator_filelist(filelist):
  46. grouped_files = defaultdict(list)
  47. for filename, speaker, languages, text in load_filelist(filelist):
  48. grouped_files[speaker].append((Path(filename), text, languages))
  49. logger.info(f"Found {len(grouped_files)} groups in {filelist}")
  50. for speaker, values in grouped_files.items():
  51. yield speaker, values, "filelist", languages, None
  52. def run_task(task):
  53. name, subset, source, languages, extension = task
  54. # Parse the files
  55. sentences = []
  56. for file in subset:
  57. if isinstance(file, tuple):
  58. file, text, languages = file
  59. else:
  60. text = None
  61. np_file = file.with_suffix(".npy")
  62. if np_file.exists() is False:
  63. logger.warning(f"Can't find {np_file}")
  64. continue
  65. if text is None:
  66. txt_file = file.with_suffix(extension)
  67. if txt_file.exists() is False:
  68. logger.warning(f"Can't find {txt_file}")
  69. continue
  70. with open(txt_file, "r") as f:
  71. text = f.read().strip()
  72. # Simple cleaning: replace { xxx } and < xxx > with space
  73. text = re.sub(r"\{.*?\}", " ", text)
  74. text = re.sub(r"<.*?>", " ", text)
  75. text = re.sub(r"\s+", " ", text)
  76. try:
  77. phones = [v for _, v in g2p(text, order=languages)]
  78. semantics = np.load(np_file)
  79. except Exception as e:
  80. logger.error(f"Failed to parse {file}: {e}")
  81. continue
  82. if isinstance(semantics, np.ndarray):
  83. semantics = semantics.tolist()
  84. sentences.append(
  85. Sentence(
  86. text=text,
  87. phones=phones,
  88. semantics=[Semantics(values=s) for s in semantics],
  89. )
  90. )
  91. # Pack the sentences
  92. return pack_pb_stream(
  93. TextData(
  94. source=source,
  95. name=name,
  96. languages=languages,
  97. sentences=sentences,
  98. )
  99. )
  100. @click.command()
  101. @click.option(
  102. "--config", type=click.Path(), default="fish_speech/configs/data/finetune.yaml"
  103. )
  104. @click.option("--output", type=click.Path(), default="data/quantized-dataset-ft.protos")
  105. @click.option("--filelist", type=click.Path(), default=None)
  106. @click.option("--num-workers", type=int, default=16)
  107. def main(config, output, filelist, num_workers):
  108. dataset_fp = open(output, "wb")
  109. generator_fn = (
  110. task_generator_yaml(config)
  111. if filelist is None
  112. else task_generator_filelist(filelist)
  113. )
  114. with Pool(num_workers) as p:
  115. for result in tqdm(p.imap_unordered(run_task, generator_fn)):
  116. dataset_fp.write(result)
  117. dataset_fp.close()
  118. if __name__ == "__main__":
  119. main()