webui.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546
  1. import gc
  2. import html
  3. import io
  4. import os
  5. import queue
  6. import wave
  7. from argparse import ArgumentParser
  8. from functools import partial
  9. from pathlib import Path
  10. import gradio as gr
  11. import librosa
  12. import numpy as np
  13. import pyrootutils
  14. import torch
  15. from loguru import logger
  16. from transformers import AutoTokenizer
  17. pyrootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
  18. from fish_speech.i18n import i18n
  19. from fish_speech.text.chn_text_norm.text import Text as ChnNormedText
  20. from fish_speech.utils import autocast_exclude_mps, set_seed
  21. from tools.api import decode_vq_tokens, encode_reference
  22. from tools.file import AUDIO_EXTENSIONS, list_files
  23. from tools.llama.generate import (
  24. GenerateRequest,
  25. GenerateResponse,
  26. WrappedGenerateResponse,
  27. launch_thread_safe_queue,
  28. )
  29. from tools.vqgan.inference import load_model as load_decoder_model
  30. # Make einx happy
  31. os.environ["EINX_FILTER_TRACEBACK"] = "false"
  32. HEADER_MD = f"""# Fish Speech
  33. {i18n("A text-to-speech model based on VQ-GAN and Llama developed by [Fish Audio](https://fish.audio).")}
  34. {i18n("You can find the source code [here](https://github.com/fishaudio/fish-speech) and models [here](https://huggingface.co/fishaudio/fish-speech-1.4).")}
  35. {i18n("Related code and weights are released under CC BY-NC-SA 4.0 License.")}
  36. {i18n("We are not responsible for any misuse of the model, please consider your local laws and regulations before using it.")}
  37. """
  38. TEXTBOX_PLACEHOLDER = i18n("Put your text here.")
  39. SPACE_IMPORTED = False
  40. def build_html_error_message(error):
  41. return f"""
  42. <div style="color: red;
  43. font-weight: bold;">
  44. {html.escape(str(error))}
  45. </div>
  46. """
  47. @torch.inference_mode()
  48. def inference(
  49. text,
  50. enable_reference_audio,
  51. reference_audio,
  52. reference_text,
  53. max_new_tokens,
  54. chunk_length,
  55. top_p,
  56. repetition_penalty,
  57. temperature,
  58. seed="0",
  59. streaming=False,
  60. ):
  61. if args.max_gradio_length > 0 and len(text) > args.max_gradio_length:
  62. return (
  63. None,
  64. None,
  65. i18n("Text is too long, please keep it under {} characters.").format(
  66. args.max_gradio_length
  67. ),
  68. )
  69. seed = int(seed)
  70. if seed != 0:
  71. set_seed(seed)
  72. logger.warning(f"set seed: {seed}")
  73. # Parse reference audio aka prompt
  74. prompt_tokens = encode_reference(
  75. decoder_model=decoder_model,
  76. reference_audio=reference_audio,
  77. enable_reference_audio=enable_reference_audio,
  78. )
  79. # LLAMA Inference
  80. request = dict(
  81. device=decoder_model.device,
  82. max_new_tokens=max_new_tokens,
  83. text=text,
  84. top_p=top_p,
  85. repetition_penalty=repetition_penalty,
  86. temperature=temperature,
  87. compile=args.compile,
  88. iterative_prompt=chunk_length > 0,
  89. chunk_length=chunk_length,
  90. max_length=2048,
  91. prompt_tokens=prompt_tokens if enable_reference_audio else None,
  92. prompt_text=reference_text if enable_reference_audio else None,
  93. )
  94. response_queue = queue.Queue()
  95. llama_queue.put(
  96. GenerateRequest(
  97. request=request,
  98. response_queue=response_queue,
  99. )
  100. )
  101. if streaming:
  102. yield wav_chunk_header(), None, None
  103. segments = []
  104. while True:
  105. result: WrappedGenerateResponse = response_queue.get()
  106. if result.status == "error":
  107. yield None, None, build_html_error_message(result.response)
  108. break
  109. result: GenerateResponse = result.response
  110. if result.action == "next":
  111. break
  112. with autocast_exclude_mps(
  113. device_type=decoder_model.device.type, dtype=args.precision
  114. ):
  115. fake_audios = decode_vq_tokens(
  116. decoder_model=decoder_model,
  117. codes=result.codes,
  118. )
  119. fake_audios = fake_audios.float().cpu().numpy()
  120. segments.append(fake_audios)
  121. if streaming:
  122. yield (fake_audios * 32768).astype(np.int16).tobytes(), None, None
  123. if len(segments) == 0:
  124. return (
  125. None,
  126. None,
  127. build_html_error_message(
  128. i18n("No audio generated, please check the input text.")
  129. ),
  130. )
  131. # No matter streaming or not, we need to return the final audio
  132. audio = np.concatenate(segments, axis=0)
  133. yield None, (decoder_model.spec_transform.sample_rate, audio), None
  134. if torch.cuda.is_available():
  135. torch.cuda.empty_cache()
  136. gc.collect()
  137. inference_stream = partial(inference, streaming=True)
  138. n_audios = 4
  139. global_audio_list = []
  140. global_error_list = []
  141. def inference_wrapper(
  142. text,
  143. enable_reference_audio,
  144. reference_audio,
  145. reference_text,
  146. max_new_tokens,
  147. chunk_length,
  148. top_p,
  149. repetition_penalty,
  150. temperature,
  151. seed,
  152. batch_infer_num,
  153. ):
  154. audios = []
  155. errors = []
  156. for _ in range(batch_infer_num):
  157. result = inference(
  158. text,
  159. enable_reference_audio,
  160. reference_audio,
  161. reference_text,
  162. max_new_tokens,
  163. chunk_length,
  164. top_p,
  165. repetition_penalty,
  166. temperature,
  167. seed,
  168. )
  169. _, audio_data, error_message = next(result)
  170. audios.append(
  171. gr.Audio(value=audio_data if audio_data else None, visible=True),
  172. )
  173. errors.append(
  174. gr.HTML(value=error_message if error_message else None, visible=True),
  175. )
  176. for _ in range(batch_infer_num, n_audios):
  177. audios.append(
  178. gr.Audio(value=None, visible=False),
  179. )
  180. errors.append(
  181. gr.HTML(value=None, visible=False),
  182. )
  183. return None, *audios, *errors
  184. def wav_chunk_header(sample_rate=44100, bit_depth=16, channels=1):
  185. buffer = io.BytesIO()
  186. with wave.open(buffer, "wb") as wav_file:
  187. wav_file.setnchannels(channels)
  188. wav_file.setsampwidth(bit_depth // 8)
  189. wav_file.setframerate(sample_rate)
  190. wav_header_bytes = buffer.getvalue()
  191. buffer.close()
  192. return wav_header_bytes
  193. def normalize_text(user_input, use_normalization):
  194. if use_normalization:
  195. return ChnNormedText(raw_text=user_input).normalize()
  196. else:
  197. return user_input
  198. def update_examples():
  199. examples_dir = Path("references")
  200. examples_dir.mkdir(parents=True, exist_ok=True)
  201. example_audios = list_files(examples_dir, AUDIO_EXTENSIONS, recursive=True)
  202. return gr.Dropdown(choices=example_audios + [""])
  203. def build_app():
  204. with gr.Blocks(theme=gr.themes.Base()) as app:
  205. gr.Markdown(HEADER_MD)
  206. # Use light theme by default
  207. app.load(
  208. None,
  209. None,
  210. js="() => {const params = new URLSearchParams(window.location.search);if (!params.has('__theme')) {params.set('__theme', '%s');window.location.search = params.toString();}}"
  211. % args.theme,
  212. )
  213. # Inference
  214. with gr.Row():
  215. with gr.Column(scale=3):
  216. text = gr.Textbox(
  217. label=i18n("Input Text"), placeholder=TEXTBOX_PLACEHOLDER, lines=10
  218. )
  219. refined_text = gr.Textbox(
  220. label=i18n("Realtime Transform Text"),
  221. placeholder=i18n(
  222. "Normalization Result Preview (Currently Only Chinese)"
  223. ),
  224. lines=5,
  225. interactive=False,
  226. )
  227. with gr.Row():
  228. if_refine_text = gr.Checkbox(
  229. label=i18n("Text Normalization"),
  230. value=False,
  231. scale=1,
  232. )
  233. with gr.Row():
  234. with gr.Column():
  235. with gr.Tab(label=i18n("Advanced Config")):
  236. with gr.Row():
  237. chunk_length = gr.Slider(
  238. label=i18n("Iterative Prompt Length, 0 means off"),
  239. minimum=50,
  240. maximum=300,
  241. value=200,
  242. step=8,
  243. )
  244. max_new_tokens = gr.Slider(
  245. label=i18n(
  246. "Maximum tokens per batch, 0 means no limit"
  247. ),
  248. minimum=0,
  249. maximum=2048,
  250. value=0, # 0 means no limit
  251. step=8,
  252. )
  253. with gr.Row():
  254. top_p = gr.Slider(
  255. label="Top-P",
  256. minimum=0.6,
  257. maximum=0.9,
  258. value=0.7,
  259. step=0.01,
  260. )
  261. repetition_penalty = gr.Slider(
  262. label=i18n("Repetition Penalty"),
  263. minimum=1,
  264. maximum=1.5,
  265. value=1.2,
  266. step=0.01,
  267. )
  268. with gr.Row():
  269. temperature = gr.Slider(
  270. label="Temperature",
  271. minimum=0.6,
  272. maximum=0.9,
  273. value=0.7,
  274. step=0.01,
  275. )
  276. seed = gr.Textbox(
  277. label="Seed",
  278. info="0 means randomized inference, otherwise deterministic",
  279. placeholder="any 32-bit-integer",
  280. value="0",
  281. )
  282. with gr.Tab(label=i18n("Reference Audio")):
  283. with gr.Row():
  284. gr.Markdown(
  285. i18n(
  286. "5 to 10 seconds of reference audio, useful for specifying speaker."
  287. )
  288. )
  289. with gr.Row():
  290. enable_reference_audio = gr.Checkbox(
  291. label=i18n("Enable Reference Audio"),
  292. )
  293. with gr.Row():
  294. example_audio_dropdown = gr.Dropdown(
  295. label=i18n("Select Example Audio"),
  296. choices=[""],
  297. value="",
  298. interactive=True,
  299. allow_custom_value=True,
  300. )
  301. with gr.Row():
  302. reference_audio = gr.Audio(
  303. label=i18n("Reference Audio"),
  304. type="filepath",
  305. )
  306. with gr.Row():
  307. reference_text = gr.Textbox(
  308. label=i18n("Reference Text"),
  309. lines=1,
  310. placeholder="在一无所知中,梦里的一天结束了,一个新的「轮回」便会开始。",
  311. value="",
  312. )
  313. with gr.Tab(label=i18n("Batch Inference")):
  314. with gr.Row():
  315. batch_infer_num = gr.Slider(
  316. label="Batch infer nums",
  317. minimum=1,
  318. maximum=n_audios,
  319. step=1,
  320. value=1,
  321. )
  322. with gr.Column(scale=3):
  323. for _ in range(n_audios):
  324. with gr.Row():
  325. error = gr.HTML(
  326. label=i18n("Error Message"),
  327. visible=True if _ == 0 else False,
  328. )
  329. global_error_list.append(error)
  330. with gr.Row():
  331. audio = gr.Audio(
  332. label=i18n("Generated Audio"),
  333. type="numpy",
  334. interactive=False,
  335. visible=True if _ == 0 else False,
  336. )
  337. global_audio_list.append(audio)
  338. with gr.Row():
  339. stream_audio = gr.Audio(
  340. label=i18n("Streaming Audio"),
  341. streaming=True,
  342. autoplay=True,
  343. interactive=False,
  344. show_download_button=True,
  345. )
  346. with gr.Row():
  347. with gr.Column(scale=3):
  348. generate = gr.Button(
  349. value="\U0001F3A7 " + i18n("Generate"), variant="primary"
  350. )
  351. generate_stream = gr.Button(
  352. value="\U0001F3A7 " + i18n("Streaming Generate"),
  353. variant="primary",
  354. )
  355. text.input(
  356. fn=normalize_text, inputs=[text, if_refine_text], outputs=[refined_text]
  357. )
  358. def select_example_audio(audio_path):
  359. audio_path = Path(audio_path)
  360. if audio_path.is_file():
  361. lab_file = Path(audio_path.with_suffix(".lab"))
  362. if lab_file.exists():
  363. lab_content = lab_file.read_text(encoding="utf-8").strip()
  364. else:
  365. lab_content = ""
  366. return str(audio_path), lab_content, True
  367. return None, "", False
  368. # Connect the dropdown to update reference audio and text
  369. example_audio_dropdown.change(
  370. fn=update_examples, inputs=[], outputs=[example_audio_dropdown]
  371. ).then(
  372. fn=select_example_audio,
  373. inputs=[example_audio_dropdown],
  374. outputs=[reference_audio, reference_text, enable_reference_audio],
  375. )
  376. # # Submit
  377. generate.click(
  378. inference_wrapper,
  379. [
  380. refined_text,
  381. enable_reference_audio,
  382. reference_audio,
  383. reference_text,
  384. max_new_tokens,
  385. chunk_length,
  386. top_p,
  387. repetition_penalty,
  388. temperature,
  389. seed,
  390. batch_infer_num,
  391. ],
  392. [stream_audio, *global_audio_list, *global_error_list],
  393. concurrency_limit=1,
  394. )
  395. generate_stream.click(
  396. inference_stream,
  397. [
  398. refined_text,
  399. enable_reference_audio,
  400. reference_audio,
  401. reference_text,
  402. max_new_tokens,
  403. chunk_length,
  404. top_p,
  405. repetition_penalty,
  406. temperature,
  407. seed,
  408. ],
  409. [stream_audio, global_audio_list[0], global_error_list[0]],
  410. concurrency_limit=1,
  411. )
  412. return app
  413. def parse_args():
  414. parser = ArgumentParser()
  415. parser.add_argument(
  416. "--llama-checkpoint-path",
  417. type=Path,
  418. default="checkpoints/fish-speech-1.4",
  419. )
  420. parser.add_argument(
  421. "--decoder-checkpoint-path",
  422. type=Path,
  423. default="checkpoints/fish-speech-1.4/firefly-gan-vq-fsq-8x1024-21hz-generator.pth",
  424. )
  425. parser.add_argument("--decoder-config-name", type=str, default="firefly_gan_vq")
  426. parser.add_argument("--device", type=str, default="cuda")
  427. parser.add_argument("--half", action="store_true")
  428. parser.add_argument("--compile", action="store_true")
  429. parser.add_argument("--max-gradio-length", type=int, default=0)
  430. parser.add_argument("--theme", type=str, default="light")
  431. return parser.parse_args()
  432. if __name__ == "__main__":
  433. args = parse_args()
  434. args.precision = torch.half if args.half else torch.bfloat16
  435. logger.info("Loading Llama model...")
  436. llama_queue = launch_thread_safe_queue(
  437. checkpoint_path=args.llama_checkpoint_path,
  438. device=args.device,
  439. precision=args.precision,
  440. compile=args.compile,
  441. )
  442. logger.info("Llama model loaded, loading VQ-GAN model...")
  443. decoder_model = load_decoder_model(
  444. config_name=args.decoder_config_name,
  445. checkpoint_path=args.decoder_checkpoint_path,
  446. device=args.device,
  447. )
  448. logger.info("Decoder model loaded, warming up...")
  449. # Dry run to check if the model is loaded correctly and avoid the first-time latency
  450. list(
  451. inference(
  452. text="Hello, world!",
  453. enable_reference_audio=False,
  454. reference_audio=None,
  455. reference_text="",
  456. max_new_tokens=0,
  457. chunk_length=200,
  458. top_p=0.7,
  459. repetition_penalty=1.2,
  460. temperature=0.7,
  461. )
  462. )
  463. logger.info("Warming up done, launching the web UI...")
  464. app = build_app()
  465. app.launch(show_api=True)