webui.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. import gc
  2. import html
  3. import os
  4. import queue
  5. import threading
  6. from argparse import ArgumentParser
  7. from pathlib import Path
  8. import gradio as gr
  9. import librosa
  10. import torch
  11. from loguru import logger
  12. from transformers import AutoTokenizer
  13. from fish_speech.i18n import i18n
  14. from tools.llama.generate import launch_thread_safe_queue
  15. from tools.vqgan.inference import load_model as load_vqgan_model
  16. # Make einx happy
  17. os.environ["EINX_FILTER_TRACEBACK"] = "false"
  18. HEADER_MD = f"""# Fish Speech
  19. {i18n("A text-to-speech model based on VQ-GAN and Llama developed by [Fish Audio](https://fish.audio).")}
  20. {i18n("You can find the source code [here](https://github.com/fishaudio/fish-speech) and models [here](https://huggingface.co/fishaudio/fish-speech-1).")}
  21. {i18n("Related code are released under BSD-3-Clause License, and weights are released under CC BY-NC-SA 4.0 License.")}
  22. {i18n("We are not responsible for any misuse of the model, please consider your local laws and regulations before using it.")}
  23. """
  24. TEXTBOX_PLACEHOLDER = i18n("Put your text here.")
  25. try:
  26. import spaces
  27. GPU_DECORATOR = spaces.GPU
  28. except ImportError:
  29. def GPU_DECORATOR(func):
  30. def wrapper(*args, **kwargs):
  31. return func(*args, **kwargs)
  32. return wrapper
  33. def build_html_error_message(error):
  34. return f"""
  35. <div style="color: red;
  36. font-weight: bold;">
  37. {html.escape(error)}
  38. </div>
  39. """
  40. @GPU_DECORATOR
  41. @torch.inference_mode()
  42. def inference(
  43. text,
  44. enable_reference_audio,
  45. reference_audio,
  46. reference_text,
  47. max_new_tokens,
  48. chunk_length,
  49. top_k,
  50. top_p,
  51. repetition_penalty,
  52. temperature,
  53. speaker,
  54. ):
  55. if args.max_gradio_length > 0 and len(text) > args.max_gradio_length:
  56. return (
  57. None,
  58. i18n("Text is too long, please keep it under {} characters.").format(
  59. args.max_gradio_length
  60. ),
  61. )
  62. # Parse reference audio aka prompt
  63. prompt_tokens = None
  64. if enable_reference_audio and reference_audio is not None:
  65. # reference_audio_sr, reference_audio_content = reference_audio
  66. reference_audio_content, _ = librosa.load(
  67. reference_audio, sr=vqgan_model.sampling_rate, mono=True
  68. )
  69. audios = torch.from_numpy(reference_audio_content).to(vqgan_model.device)[
  70. None, None, :
  71. ]
  72. logger.info(
  73. f"Loaded audio with {audios.shape[2] / vqgan_model.sampling_rate:.2f} seconds"
  74. )
  75. # VQ Encoder
  76. audio_lengths = torch.tensor(
  77. [audios.shape[2]], device=vqgan_model.device, dtype=torch.long
  78. )
  79. prompt_tokens = vqgan_model.encode(audios, audio_lengths)[0][0]
  80. # LLAMA Inference
  81. request = dict(
  82. tokenizer=llama_tokenizer,
  83. device=vqgan_model.device,
  84. max_new_tokens=max_new_tokens,
  85. text=text,
  86. top_k=int(top_k) if top_k > 0 else None,
  87. top_p=top_p,
  88. repetition_penalty=repetition_penalty,
  89. temperature=temperature,
  90. compile=args.compile,
  91. iterative_prompt=chunk_length > 0,
  92. chunk_length=chunk_length,
  93. max_length=args.max_length,
  94. speaker=speaker if speaker else None,
  95. prompt_tokens=prompt_tokens if enable_reference_audio else None,
  96. prompt_text=reference_text if enable_reference_audio else None,
  97. )
  98. payload = dict(
  99. response_queue=queue.Queue(),
  100. request=request,
  101. )
  102. llama_queue.put(payload)
  103. codes = []
  104. while True:
  105. result = payload["response_queue"].get()
  106. if result == "next":
  107. # TODO: handle next sentence
  108. continue
  109. if result == "done":
  110. if payload["success"] is False:
  111. raise payload["response"]
  112. break
  113. codes.append(result)
  114. codes = torch.cat(codes, dim=1)
  115. # VQGAN Inference
  116. feature_lengths = torch.tensor([codes.shape[1]], device=vqgan_model.device)
  117. fake_audios = vqgan_model.decode(
  118. indices=codes[None], feature_lengths=feature_lengths, return_audios=True
  119. )[0, 0]
  120. fake_audios = fake_audios.float().cpu().numpy()
  121. if torch.cuda.is_available():
  122. torch.cuda.empty_cache()
  123. gc.collect()
  124. return (vqgan_model.sampling_rate, fake_audios), None
  125. def build_app():
  126. with gr.Blocks(theme=gr.themes.Base()) as app:
  127. gr.Markdown(HEADER_MD)
  128. # Use light theme by default
  129. app.load(
  130. None,
  131. None,
  132. js="() => {const params = new URLSearchParams(window.location.search);if (!params.has('__theme')) {params.set('__theme', 'light');window.location.search = params.toString();}}",
  133. )
  134. # Inference
  135. with gr.Row():
  136. with gr.Column(scale=3):
  137. text = gr.Textbox(
  138. label=i18n("Input Text"), placeholder=TEXTBOX_PLACEHOLDER, lines=15
  139. )
  140. with gr.Row():
  141. with gr.Tab(label=i18n("Advanced Config")):
  142. chunk_length = gr.Slider(
  143. label=i18n("Iterative Prompt Length, 0 means off"),
  144. minimum=0,
  145. maximum=500,
  146. value=30,
  147. step=8,
  148. )
  149. max_new_tokens = gr.Slider(
  150. label=i18n("Maximum tokens per batch, 0 means no limit"),
  151. minimum=0,
  152. maximum=args.max_length,
  153. value=0, # 0 means no limit
  154. step=8,
  155. )
  156. top_k = gr.Slider(
  157. label="Top-K", minimum=0, maximum=100, value=0, step=1
  158. )
  159. top_p = gr.Slider(
  160. label="Top-P", minimum=0, maximum=1, value=0.7, step=0.01
  161. )
  162. repetition_penalty = gr.Slider(
  163. label=i18n("Repetition Penalty"),
  164. minimum=0,
  165. maximum=2,
  166. value=1.5,
  167. step=0.01,
  168. )
  169. temperature = gr.Slider(
  170. label="Temperature",
  171. minimum=0,
  172. maximum=2,
  173. value=0.7,
  174. step=0.01,
  175. )
  176. speaker = gr.Textbox(
  177. label=i18n("Speaker"),
  178. placeholder=i18n("Type name of the speaker"),
  179. lines=1,
  180. )
  181. with gr.Tab(label=i18n("Reference Audio")):
  182. gr.Markdown(
  183. i18n(
  184. "5 to 10 seconds of reference audio, useful for specifying speaker."
  185. )
  186. )
  187. enable_reference_audio = gr.Checkbox(
  188. label=i18n("Enable Reference Audio"),
  189. )
  190. reference_audio = gr.Audio(
  191. label=i18n("Reference Audio"),
  192. type="filepath",
  193. )
  194. reference_text = gr.Textbox(
  195. label=i18n("Reference Text"),
  196. placeholder=i18n("Reference Text"),
  197. lines=1,
  198. value="在一无所知中,梦里的一天结束了,一个新的「轮回」便会开始。",
  199. )
  200. with gr.Column(scale=3):
  201. with gr.Row():
  202. error = gr.HTML(label=i18n("Error Message"))
  203. with gr.Row():
  204. audio = gr.Audio(label=i18n("Generated Audio"), type="numpy")
  205. with gr.Row():
  206. with gr.Column(scale=3):
  207. generate = gr.Button(
  208. value="\U0001F3A7 " + i18n("Generate"), variant="primary"
  209. )
  210. # # Submit
  211. generate.click(
  212. inference,
  213. [
  214. text,
  215. enable_reference_audio,
  216. reference_audio,
  217. reference_text,
  218. max_new_tokens,
  219. chunk_length,
  220. top_k,
  221. top_p,
  222. repetition_penalty,
  223. temperature,
  224. speaker,
  225. ],
  226. [audio, error],
  227. concurrency_limit=1,
  228. )
  229. return app
  230. def parse_args():
  231. parser = ArgumentParser()
  232. parser.add_argument(
  233. "--llama-checkpoint-path",
  234. type=Path,
  235. default="checkpoints/text2semantic-sft-large-v1-4k.pth",
  236. )
  237. parser.add_argument(
  238. "--llama-config-name", type=str, default="dual_ar_2_codebook_large"
  239. )
  240. parser.add_argument(
  241. "--vqgan-checkpoint-path",
  242. type=Path,
  243. default="checkpoints/vq-gan-group-fsq-2x1024.pth",
  244. )
  245. parser.add_argument("--vqgan-config-name", type=str, default="vqgan_pretrain")
  246. parser.add_argument("--tokenizer", type=str, default="fishaudio/fish-speech-1")
  247. parser.add_argument("--device", type=str, default="cuda")
  248. parser.add_argument("--half", action="store_true")
  249. parser.add_argument("--max-length", type=int, default=2048)
  250. parser.add_argument("--compile", action="store_true")
  251. parser.add_argument("--max-gradio-length", type=int, default=0)
  252. return parser.parse_args()
  253. if __name__ == "__main__":
  254. args = parse_args()
  255. args.precision = torch.half if args.half else torch.bfloat16
  256. logger.info("Loading Llama model...")
  257. llama_queue = launch_thread_safe_queue(
  258. config_name=args.llama_config_name,
  259. checkpoint_path=args.llama_checkpoint_path,
  260. device=args.device,
  261. precision=args.precision,
  262. max_length=args.max_length,
  263. compile=args.compile,
  264. )
  265. llama_tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
  266. logger.info("Llama model loaded, loading VQ-GAN model...")
  267. vqgan_model = load_vqgan_model(
  268. config_name=args.vqgan_config_name,
  269. checkpoint_path=args.vqgan_checkpoint_path,
  270. device=args.device,
  271. )
  272. logger.info("VQ-GAN model loaded, warming up...")
  273. # Dry run to check if the model is loaded correctly and avoid the first-time latency
  274. inference(
  275. text="Hello, world!",
  276. enable_reference_audio=False,
  277. reference_audio=None,
  278. reference_text="",
  279. max_new_tokens=0,
  280. chunk_length=0,
  281. top_k=0, # 0 means no limit
  282. top_p=0.7,
  283. repetition_penalty=1.5,
  284. temperature=0.7,
  285. speaker=None,
  286. )
  287. logger.info("Warming up done, launching the web UI...")
  288. app = build_app()
  289. app.launch(show_api=False)