webui.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623
  1. import gc
  2. import html
  3. import io
  4. import os
  5. import queue
  6. import wave
  7. from argparse import ArgumentParser
  8. from functools import partial
  9. from pathlib import Path
  10. import gradio as gr
  11. import librosa
  12. import numpy as np
  13. import pyrootutils
  14. import torch
  15. from loguru import logger
  16. from transformers import AutoTokenizer
  17. pyrootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
  18. from fish_speech.i18n import i18n
  19. from fish_speech.text.chn_text_norm.text import Text as ChnNormedText
  20. from tools.api import decode_vq_tokens, encode_reference
  21. from tools.auto_rerank import batch_asr, calculate_wer, is_chinese, load_model
  22. from tools.llama.generate import (
  23. GenerateRequest,
  24. GenerateResponse,
  25. WrappedGenerateResponse,
  26. launch_thread_safe_queue,
  27. )
  28. from tools.vqgan.inference import load_model as load_decoder_model
  29. # Make einx happy
  30. os.environ["EINX_FILTER_TRACEBACK"] = "false"
  31. HEADER_MD = f"""# Fish Speech
  32. {i18n("A text-to-speech model based on VQ-GAN and Llama developed by [Fish Audio](https://fish.audio).")}
  33. {i18n("You can find the source code [here](https://github.com/fishaudio/fish-speech) and models [here](https://huggingface.co/fishaudio/fish-speech-1).")}
  34. {i18n("Related code are released under BSD-3-Clause License, and weights are released under CC BY-NC-SA 4.0 License.")}
  35. {i18n("We are not responsible for any misuse of the model, please consider your local laws and regulations before using it.")}
  36. """
  37. TEXTBOX_PLACEHOLDER = i18n("Put your text here.")
  38. SPACE_IMPORTED = False
  39. def build_html_error_message(error):
  40. return f"""
  41. <div style="color: red;
  42. font-weight: bold;">
  43. {html.escape(str(error))}
  44. </div>
  45. """
  46. @torch.inference_mode()
  47. def inference(
  48. text,
  49. enable_reference_audio,
  50. reference_audio,
  51. reference_text,
  52. max_new_tokens,
  53. chunk_length,
  54. top_p,
  55. repetition_penalty,
  56. temperature,
  57. streaming=False,
  58. ):
  59. if args.max_gradio_length > 0 and len(text) > args.max_gradio_length:
  60. return (
  61. None,
  62. None,
  63. i18n("Text is too long, please keep it under {} characters.").format(
  64. args.max_gradio_length
  65. ),
  66. )
  67. # Parse reference audio aka prompt
  68. prompt_tokens = encode_reference(
  69. decoder_model=decoder_model,
  70. reference_audio=reference_audio,
  71. enable_reference_audio=enable_reference_audio,
  72. )
  73. # LLAMA Inference
  74. request = dict(
  75. device=decoder_model.device,
  76. max_new_tokens=max_new_tokens,
  77. text=text,
  78. top_p=top_p,
  79. repetition_penalty=repetition_penalty,
  80. temperature=temperature,
  81. compile=args.compile,
  82. iterative_prompt=chunk_length > 0,
  83. chunk_length=chunk_length,
  84. max_length=2048,
  85. prompt_tokens=prompt_tokens if enable_reference_audio else None,
  86. prompt_text=reference_text if enable_reference_audio else None,
  87. )
  88. response_queue = queue.Queue()
  89. llama_queue.put(
  90. GenerateRequest(
  91. request=request,
  92. response_queue=response_queue,
  93. )
  94. )
  95. if streaming:
  96. yield wav_chunk_header(), None, None
  97. segments = []
  98. while True:
  99. result: WrappedGenerateResponse = response_queue.get()
  100. if result.status == "error":
  101. yield None, None, build_html_error_message(result.response)
  102. break
  103. result: GenerateResponse = result.response
  104. if result.action == "next":
  105. break
  106. with torch.autocast(
  107. device_type=(
  108. "cpu"
  109. if decoder_model.device.type == "mps"
  110. else decoder_model.device.type
  111. ),
  112. dtype=args.precision,
  113. ):
  114. fake_audios = decode_vq_tokens(
  115. decoder_model=decoder_model,
  116. codes=result.codes,
  117. )
  118. fake_audios = fake_audios.float().cpu().numpy()
  119. segments.append(fake_audios)
  120. if streaming:
  121. yield (fake_audios * 32768).astype(np.int16).tobytes(), None, None
  122. if len(segments) == 0:
  123. return (
  124. None,
  125. None,
  126. build_html_error_message(
  127. i18n("No audio generated, please check the input text.")
  128. ),
  129. )
  130. # No matter streaming or not, we need to return the final audio
  131. audio = np.concatenate(segments, axis=0)
  132. yield None, (decoder_model.spec_transform.sample_rate, audio), None
  133. if torch.cuda.is_available():
  134. torch.cuda.empty_cache()
  135. gc.collect()
  136. def inference_with_auto_rerank(
  137. text,
  138. enable_reference_audio,
  139. reference_audio,
  140. reference_text,
  141. max_new_tokens,
  142. chunk_length,
  143. top_p,
  144. repetition_penalty,
  145. temperature,
  146. use_auto_rerank,
  147. streaming=False,
  148. ):
  149. max_attempts = 2 if use_auto_rerank else 1
  150. best_wer = float("inf")
  151. best_audio = None
  152. best_sample_rate = None
  153. for attempt in range(max_attempts):
  154. audio_generator = inference(
  155. text,
  156. enable_reference_audio,
  157. reference_audio,
  158. reference_text,
  159. max_new_tokens,
  160. chunk_length,
  161. top_p,
  162. repetition_penalty,
  163. temperature,
  164. streaming=False,
  165. )
  166. # 获取音频数据
  167. for _ in audio_generator:
  168. pass
  169. _, (sample_rate, audio), message = _
  170. if audio is None:
  171. return None, None, message
  172. if not use_auto_rerank:
  173. return None, (sample_rate, audio), None
  174. asr_result = batch_asr(asr_model, [audio], sample_rate)[0]
  175. wer = calculate_wer(text, asr_result["text"])
  176. if wer <= 0.3 and not asr_result["huge_gap"]:
  177. return None, (sample_rate, audio), None
  178. if wer < best_wer:
  179. best_wer = wer
  180. best_audio = audio
  181. best_sample_rate = sample_rate
  182. if attempt == max_attempts - 1:
  183. break
  184. return None, (best_sample_rate, best_audio), None
  185. inference_stream = partial(inference, streaming=True)
  186. n_audios = 4
  187. global_audio_list = []
  188. global_error_list = []
  189. def inference_wrapper(
  190. text,
  191. enable_reference_audio,
  192. reference_audio,
  193. reference_text,
  194. max_new_tokens,
  195. chunk_length,
  196. top_p,
  197. repetition_penalty,
  198. temperature,
  199. batch_infer_num,
  200. if_load_asr_model,
  201. ):
  202. audios = []
  203. errors = []
  204. for _ in range(batch_infer_num):
  205. result = inference_with_auto_rerank(
  206. text,
  207. enable_reference_audio,
  208. reference_audio,
  209. reference_text,
  210. max_new_tokens,
  211. chunk_length,
  212. top_p,
  213. repetition_penalty,
  214. temperature,
  215. if_load_asr_model,
  216. )
  217. _, audio_data, error_message = result
  218. audios.append(
  219. gr.Audio(value=audio_data if audio_data else None, visible=True),
  220. )
  221. errors.append(
  222. gr.HTML(value=error_message if error_message else None, visible=True),
  223. )
  224. for _ in range(batch_infer_num, n_audios):
  225. audios.append(
  226. gr.Audio(value=None, visible=False),
  227. )
  228. errors.append(
  229. gr.HTML(value=None, visible=False),
  230. )
  231. return None, *audios, *errors
  232. def wav_chunk_header(sample_rate=44100, bit_depth=16, channels=1):
  233. buffer = io.BytesIO()
  234. with wave.open(buffer, "wb") as wav_file:
  235. wav_file.setnchannels(channels)
  236. wav_file.setsampwidth(bit_depth // 8)
  237. wav_file.setframerate(sample_rate)
  238. wav_header_bytes = buffer.getvalue()
  239. buffer.close()
  240. return wav_header_bytes
  241. def normalize_text(user_input, use_normalization):
  242. if use_normalization:
  243. return ChnNormedText(raw_text=user_input).normalize()
  244. else:
  245. return user_input
  246. asr_model = None
  247. def change_if_load_asr_model(if_load):
  248. global asr_model
  249. if if_load:
  250. gr.Warning("Loading faster whisper model...")
  251. if asr_model is None:
  252. asr_model = load_model()
  253. return gr.Checkbox(label="Unload faster whisper model", value=if_load)
  254. if if_load is False:
  255. gr.Warning("Unloading faster whisper model...")
  256. del asr_model
  257. asr_model = None
  258. if torch.cuda.is_available():
  259. torch.cuda.empty_cache()
  260. gc.collect()
  261. return gr.Checkbox(label="Load faster whisper model", value=if_load)
  262. def change_if_auto_label(if_load, if_auto_label, enable_ref, ref_audio, ref_text):
  263. if if_load and asr_model is not None:
  264. if (
  265. if_auto_label
  266. and enable_ref
  267. and ref_audio is not None
  268. and ref_text.strip() == ""
  269. ):
  270. data, sample_rate = librosa.load(ref_audio)
  271. res = batch_asr(asr_model, [data], sample_rate)[0]
  272. ref_text = res["text"]
  273. else:
  274. gr.Warning("Whisper model not loaded!")
  275. return gr.Textbox(value=ref_text)
  276. def build_app():
  277. with gr.Blocks(theme=gr.themes.Base()) as app:
  278. gr.Markdown(HEADER_MD)
  279. # Use light theme by default
  280. app.load(
  281. None,
  282. None,
  283. js="() => {const params = new URLSearchParams(window.location.search);if (!params.has('__theme')) {params.set('__theme', '%s');window.location.search = params.toString();}}"
  284. % args.theme,
  285. )
  286. # Inference
  287. with gr.Row():
  288. with gr.Column(scale=3):
  289. text = gr.Textbox(
  290. label=i18n("Input Text"), placeholder=TEXTBOX_PLACEHOLDER, lines=10
  291. )
  292. refined_text = gr.Textbox(
  293. label=i18n("Realtime Transform Text"),
  294. placeholder=i18n(
  295. "Normalization Result Preview (Currently Only Chinese)"
  296. ),
  297. lines=5,
  298. interactive=False,
  299. )
  300. with gr.Row():
  301. if_refine_text = gr.Checkbox(
  302. label=i18n("Text Normalization"),
  303. value=True,
  304. scale=1,
  305. )
  306. if_load_asr_model = gr.Checkbox(
  307. label=i18n("Load / Unload ASR model for auto-reranking"),
  308. value=False,
  309. scale=3,
  310. )
  311. with gr.Row():
  312. with gr.Tab(label=i18n("Advanced Config")):
  313. chunk_length = gr.Slider(
  314. label=i18n("Iterative Prompt Length, 0 means off"),
  315. minimum=0,
  316. maximum=500,
  317. value=100,
  318. step=8,
  319. )
  320. max_new_tokens = gr.Slider(
  321. label=i18n("Maximum tokens per batch, 0 means no limit"),
  322. minimum=0,
  323. maximum=2048,
  324. value=1024, # 0 means no limit
  325. step=8,
  326. )
  327. top_p = gr.Slider(
  328. label="Top-P",
  329. minimum=0.6,
  330. maximum=0.9,
  331. value=0.7,
  332. step=0.01,
  333. )
  334. repetition_penalty = gr.Slider(
  335. label=i18n("Repetition Penalty"),
  336. minimum=1,
  337. maximum=1.5,
  338. value=1.2,
  339. step=0.01,
  340. )
  341. temperature = gr.Slider(
  342. label="Temperature",
  343. minimum=0.6,
  344. maximum=0.9,
  345. value=0.7,
  346. step=0.01,
  347. )
  348. with gr.Tab(label=i18n("Reference Audio")):
  349. gr.Markdown(
  350. i18n(
  351. "5 to 10 seconds of reference audio, useful for specifying speaker."
  352. )
  353. )
  354. enable_reference_audio = gr.Checkbox(
  355. label=i18n("Enable Reference Audio"),
  356. )
  357. reference_audio = gr.Audio(
  358. label=i18n("Reference Audio"),
  359. type="filepath",
  360. )
  361. with gr.Row():
  362. if_auto_label = gr.Checkbox(
  363. label=i18n("Auto Labeling"),
  364. min_width=100,
  365. scale=0,
  366. value=False,
  367. )
  368. reference_text = gr.Textbox(
  369. label=i18n("Reference Text"),
  370. lines=1,
  371. placeholder="在一无所知中,梦里的一天结束了,一个新的「轮回」便会开始。",
  372. value="",
  373. )
  374. with gr.Tab(label=i18n("Batch Inference")):
  375. batch_infer_num = gr.Slider(
  376. label="Batch infer nums",
  377. minimum=1,
  378. maximum=n_audios,
  379. step=1,
  380. value=1,
  381. )
  382. with gr.Column(scale=3):
  383. for _ in range(n_audios):
  384. with gr.Row():
  385. error = gr.HTML(
  386. label=i18n("Error Message"),
  387. visible=True if _ == 0 else False,
  388. )
  389. global_error_list.append(error)
  390. with gr.Row():
  391. audio = gr.Audio(
  392. label=i18n("Generated Audio"),
  393. type="numpy",
  394. interactive=False,
  395. visible=True if _ == 0 else False,
  396. )
  397. global_audio_list.append(audio)
  398. with gr.Row():
  399. stream_audio = gr.Audio(
  400. label=i18n("Streaming Audio"),
  401. streaming=True,
  402. autoplay=True,
  403. interactive=False,
  404. show_download_button=True,
  405. )
  406. with gr.Row():
  407. with gr.Column(scale=3):
  408. generate = gr.Button(
  409. value="\U0001F3A7 " + i18n("Generate"), variant="primary"
  410. )
  411. generate_stream = gr.Button(
  412. value="\U0001F3A7 " + i18n("Streaming Generate"),
  413. variant="primary",
  414. )
  415. text.input(
  416. fn=normalize_text, inputs=[text, if_refine_text], outputs=[refined_text]
  417. )
  418. if_load_asr_model.change(
  419. fn=change_if_load_asr_model,
  420. inputs=[if_load_asr_model],
  421. outputs=[if_load_asr_model],
  422. )
  423. if_auto_label.change(
  424. fn=lambda: gr.Textbox(value=""),
  425. inputs=[],
  426. outputs=[reference_text],
  427. ).then(
  428. fn=change_if_auto_label,
  429. inputs=[
  430. if_load_asr_model,
  431. if_auto_label,
  432. enable_reference_audio,
  433. reference_audio,
  434. reference_text,
  435. ],
  436. outputs=[reference_text],
  437. )
  438. # # Submit
  439. generate.click(
  440. inference_wrapper,
  441. [
  442. refined_text,
  443. enable_reference_audio,
  444. reference_audio,
  445. reference_text,
  446. max_new_tokens,
  447. chunk_length,
  448. top_p,
  449. repetition_penalty,
  450. temperature,
  451. batch_infer_num,
  452. if_load_asr_model,
  453. ],
  454. [stream_audio, *global_audio_list, *global_error_list],
  455. concurrency_limit=1,
  456. )
  457. generate_stream.click(
  458. inference_stream,
  459. [
  460. refined_text,
  461. enable_reference_audio,
  462. reference_audio,
  463. reference_text,
  464. max_new_tokens,
  465. chunk_length,
  466. top_p,
  467. repetition_penalty,
  468. temperature,
  469. ],
  470. [stream_audio, global_audio_list[0], global_error_list[0]],
  471. concurrency_limit=10,
  472. )
  473. return app
  474. def parse_args():
  475. parser = ArgumentParser()
  476. parser.add_argument(
  477. "--llama-checkpoint-path",
  478. type=Path,
  479. default="checkpoints/fish-speech-1.2-sft",
  480. )
  481. parser.add_argument(
  482. "--decoder-checkpoint-path",
  483. type=Path,
  484. default="checkpoints/fish-speech-1.2-sft/firefly-gan-vq-fsq-4x1024-42hz-generator.pth",
  485. )
  486. parser.add_argument("--decoder-config-name", type=str, default="firefly_gan_vq")
  487. parser.add_argument("--device", type=str, default="cuda")
  488. parser.add_argument("--half", action="store_true")
  489. parser.add_argument("--compile", action="store_true")
  490. parser.add_argument("--max-gradio-length", type=int, default=0)
  491. parser.add_argument("--theme", type=str, default="light")
  492. return parser.parse_args()
  493. if __name__ == "__main__":
  494. args = parse_args()
  495. args.precision = torch.half if args.half else torch.bfloat16
  496. logger.info("Loading Llama model...")
  497. llama_queue = launch_thread_safe_queue(
  498. checkpoint_path=args.llama_checkpoint_path,
  499. device=args.device,
  500. precision=args.precision,
  501. compile=args.compile,
  502. )
  503. logger.info("Llama model loaded, loading VQ-GAN model...")
  504. decoder_model = load_decoder_model(
  505. config_name=args.decoder_config_name,
  506. checkpoint_path=args.decoder_checkpoint_path,
  507. device=args.device,
  508. )
  509. logger.info("Decoder model loaded, warming up...")
  510. # Dry run to check if the model is loaded correctly and avoid the first-time latency
  511. list(
  512. inference(
  513. text="Hello, world!",
  514. enable_reference_audio=False,
  515. reference_audio=None,
  516. reference_text="",
  517. max_new_tokens=0,
  518. chunk_length=100,
  519. top_p=0.7,
  520. repetition_penalty=1.2,
  521. temperature=0.7,
  522. )
  523. )
  524. logger.info("Warming up done, launching the web UI...")
  525. app = build_app()
  526. app.launch(show_api=True)