webui.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. import gc
  2. import html
  3. import io
  4. import os
  5. import queue
  6. import wave
  7. from argparse import ArgumentParser
  8. from functools import partial
  9. from pathlib import Path
  10. import gradio as gr
  11. import librosa
  12. import numpy as np
  13. import pyrootutils
  14. import torch
  15. from loguru import logger
  16. from transformers import AutoTokenizer
  17. pyrootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
  18. from fish_speech.i18n import i18n
  19. from fish_speech.text.chn_text_norm.text import Text as ChnNormedText
  20. from fish_speech.utils import autocast_exclude_mps
  21. from tools.api import decode_vq_tokens, encode_reference
  22. from tools.auto_rerank import batch_asr, calculate_wer, is_chinese, load_model
  23. from tools.llama.generate import (
  24. GenerateRequest,
  25. GenerateResponse,
  26. WrappedGenerateResponse,
  27. launch_thread_safe_queue,
  28. )
  29. from tools.vqgan.inference import load_model as load_decoder_model
  30. # Make einx happy
  31. os.environ["EINX_FILTER_TRACEBACK"] = "false"
  32. HEADER_MD = f"""# Fish Speech
  33. {i18n("A text-to-speech model based on VQ-GAN and Llama developed by [Fish Audio](https://fish.audio).")}
  34. {i18n("You can find the source code [here](https://github.com/fishaudio/fish-speech) and models [here](https://huggingface.co/fishaudio/fish-speech-1).")}
  35. {i18n("Related code and weights are released under CC BY-NC-SA 4.0 License.")}
  36. {i18n("We are not responsible for any misuse of the model, please consider your local laws and regulations before using it.")}
  37. """
  38. TEXTBOX_PLACEHOLDER = i18n("Put your text here.")
  39. SPACE_IMPORTED = False
  40. def build_html_error_message(error):
  41. return f"""
  42. <div style="color: red;
  43. font-weight: bold;">
  44. {html.escape(str(error))}
  45. </div>
  46. """
  47. @torch.inference_mode()
  48. def inference(
  49. text,
  50. enable_reference_audio,
  51. reference_audio,
  52. reference_text,
  53. max_new_tokens,
  54. chunk_length,
  55. top_p,
  56. repetition_penalty,
  57. temperature,
  58. streaming=False,
  59. ):
  60. if args.max_gradio_length > 0 and len(text) > args.max_gradio_length:
  61. return (
  62. None,
  63. None,
  64. i18n("Text is too long, please keep it under {} characters.").format(
  65. args.max_gradio_length
  66. ),
  67. )
  68. # Parse reference audio aka prompt
  69. prompt_tokens = encode_reference(
  70. decoder_model=decoder_model,
  71. reference_audio=reference_audio,
  72. enable_reference_audio=enable_reference_audio,
  73. )
  74. # LLAMA Inference
  75. request = dict(
  76. device=decoder_model.device,
  77. max_new_tokens=max_new_tokens,
  78. text=text,
  79. top_p=top_p,
  80. repetition_penalty=repetition_penalty,
  81. temperature=temperature,
  82. compile=args.compile,
  83. iterative_prompt=chunk_length > 0,
  84. chunk_length=chunk_length,
  85. max_length=2048,
  86. prompt_tokens=prompt_tokens if enable_reference_audio else None,
  87. prompt_text=reference_text if enable_reference_audio else None,
  88. )
  89. response_queue = queue.Queue()
  90. llama_queue.put(
  91. GenerateRequest(
  92. request=request,
  93. response_queue=response_queue,
  94. )
  95. )
  96. if streaming:
  97. yield wav_chunk_header(), None, None
  98. segments = []
  99. while True:
  100. result: WrappedGenerateResponse = response_queue.get()
  101. if result.status == "error":
  102. yield None, None, build_html_error_message(result.response)
  103. break
  104. result: GenerateResponse = result.response
  105. if result.action == "next":
  106. break
  107. with autocast_exclude_mps(
  108. device_type=decoder_model.device.type, dtype=args.precision
  109. ):
  110. fake_audios = decode_vq_tokens(
  111. decoder_model=decoder_model,
  112. codes=result.codes,
  113. )
  114. fake_audios = fake_audios.float().cpu().numpy()
  115. segments.append(fake_audios)
  116. if streaming:
  117. yield (fake_audios * 32768).astype(np.int16).tobytes(), None, None
  118. if len(segments) == 0:
  119. return (
  120. None,
  121. None,
  122. build_html_error_message(
  123. i18n("No audio generated, please check the input text.")
  124. ),
  125. )
  126. # No matter streaming or not, we need to return the final audio
  127. audio = np.concatenate(segments, axis=0)
  128. yield None, (decoder_model.spec_transform.sample_rate, audio), None
  129. if torch.cuda.is_available():
  130. torch.cuda.empty_cache()
  131. gc.collect()
  132. def inference_with_auto_rerank(
  133. text,
  134. enable_reference_audio,
  135. reference_audio,
  136. reference_text,
  137. max_new_tokens,
  138. chunk_length,
  139. top_p,
  140. repetition_penalty,
  141. temperature,
  142. use_auto_rerank,
  143. streaming=False,
  144. ):
  145. max_attempts = 2 if use_auto_rerank else 1
  146. best_wer = float("inf")
  147. best_audio = None
  148. best_sample_rate = None
  149. for attempt in range(max_attempts):
  150. audio_generator = inference(
  151. text,
  152. enable_reference_audio,
  153. reference_audio,
  154. reference_text,
  155. max_new_tokens,
  156. chunk_length,
  157. top_p,
  158. repetition_penalty,
  159. temperature,
  160. streaming=False,
  161. )
  162. # 获取音频数据
  163. for _ in audio_generator:
  164. pass
  165. _, (sample_rate, audio), message = _
  166. if audio is None:
  167. return None, None, message
  168. if not use_auto_rerank:
  169. return None, (sample_rate, audio), None
  170. asr_result = batch_asr(asr_model, [audio], sample_rate)[0]
  171. wer = calculate_wer(text, asr_result["text"])
  172. if wer <= 0.3 and not asr_result["huge_gap"]:
  173. return None, (sample_rate, audio), None
  174. if wer < best_wer:
  175. best_wer = wer
  176. best_audio = audio
  177. best_sample_rate = sample_rate
  178. if attempt == max_attempts - 1:
  179. break
  180. return None, (best_sample_rate, best_audio), None
  181. inference_stream = partial(inference, streaming=True)
  182. n_audios = 4
  183. global_audio_list = []
  184. global_error_list = []
  185. def inference_wrapper(
  186. text,
  187. enable_reference_audio,
  188. reference_audio,
  189. reference_text,
  190. max_new_tokens,
  191. chunk_length,
  192. top_p,
  193. repetition_penalty,
  194. temperature,
  195. batch_infer_num,
  196. if_load_asr_model,
  197. ):
  198. audios = []
  199. errors = []
  200. for _ in range(batch_infer_num):
  201. result = inference_with_auto_rerank(
  202. text,
  203. enable_reference_audio,
  204. reference_audio,
  205. reference_text,
  206. max_new_tokens,
  207. chunk_length,
  208. top_p,
  209. repetition_penalty,
  210. temperature,
  211. if_load_asr_model,
  212. )
  213. _, audio_data, error_message = result
  214. audios.append(
  215. gr.Audio(value=audio_data if audio_data else None, visible=True),
  216. )
  217. errors.append(
  218. gr.HTML(value=error_message if error_message else None, visible=True),
  219. )
  220. for _ in range(batch_infer_num, n_audios):
  221. audios.append(
  222. gr.Audio(value=None, visible=False),
  223. )
  224. errors.append(
  225. gr.HTML(value=None, visible=False),
  226. )
  227. return None, *audios, *errors
  228. def wav_chunk_header(sample_rate=44100, bit_depth=16, channels=1):
  229. buffer = io.BytesIO()
  230. with wave.open(buffer, "wb") as wav_file:
  231. wav_file.setnchannels(channels)
  232. wav_file.setsampwidth(bit_depth // 8)
  233. wav_file.setframerate(sample_rate)
  234. wav_header_bytes = buffer.getvalue()
  235. buffer.close()
  236. return wav_header_bytes
  237. def normalize_text(user_input, use_normalization):
  238. if use_normalization:
  239. return ChnNormedText(raw_text=user_input).normalize()
  240. else:
  241. return user_input
  242. asr_model = None
  243. def change_if_load_asr_model(if_load):
  244. global asr_model
  245. if if_load:
  246. gr.Warning("Loading faster whisper model...")
  247. if asr_model is None:
  248. asr_model = load_model()
  249. return gr.Checkbox(label="Unload faster whisper model", value=if_load)
  250. if if_load is False:
  251. gr.Warning("Unloading faster whisper model...")
  252. del asr_model
  253. asr_model = None
  254. if torch.cuda.is_available():
  255. torch.cuda.empty_cache()
  256. gc.collect()
  257. return gr.Checkbox(label="Load faster whisper model", value=if_load)
  258. def change_if_auto_label(if_load, if_auto_label, enable_ref, ref_audio, ref_text):
  259. if if_load and asr_model is not None:
  260. if (
  261. if_auto_label
  262. and enable_ref
  263. and ref_audio is not None
  264. and ref_text.strip() == ""
  265. ):
  266. data, sample_rate = librosa.load(ref_audio)
  267. res = batch_asr(asr_model, [data], sample_rate)[0]
  268. ref_text = res["text"]
  269. else:
  270. gr.Warning("Whisper model not loaded!")
  271. return gr.Textbox(value=ref_text)
  272. def build_app():
  273. with gr.Blocks(theme=gr.themes.Base()) as app:
  274. gr.Markdown(HEADER_MD)
  275. # Use light theme by default
  276. app.load(
  277. None,
  278. None,
  279. js="() => {const params = new URLSearchParams(window.location.search);if (!params.has('__theme')) {params.set('__theme', '%s');window.location.search = params.toString();}}"
  280. % args.theme,
  281. )
  282. # Inference
  283. with gr.Row():
  284. with gr.Column(scale=3):
  285. text = gr.Textbox(
  286. label=i18n("Input Text"), placeholder=TEXTBOX_PLACEHOLDER, lines=10
  287. )
  288. refined_text = gr.Textbox(
  289. label=i18n("Realtime Transform Text"),
  290. placeholder=i18n(
  291. "Normalization Result Preview (Currently Only Chinese)"
  292. ),
  293. lines=5,
  294. interactive=False,
  295. )
  296. with gr.Row():
  297. if_refine_text = gr.Checkbox(
  298. label=i18n("Text Normalization"),
  299. value=True,
  300. scale=1,
  301. )
  302. if_load_asr_model = gr.Checkbox(
  303. label=i18n("Load / Unload ASR model for auto-reranking"),
  304. value=False,
  305. scale=3,
  306. )
  307. with gr.Row():
  308. with gr.Tab(label=i18n("Advanced Config")):
  309. chunk_length = gr.Slider(
  310. label=i18n("Iterative Prompt Length, 0 means off"),
  311. minimum=0,
  312. maximum=500,
  313. value=100,
  314. step=8,
  315. )
  316. max_new_tokens = gr.Slider(
  317. label=i18n("Maximum tokens per batch, 0 means no limit"),
  318. minimum=0,
  319. maximum=2048,
  320. value=1024, # 0 means no limit
  321. step=8,
  322. )
  323. top_p = gr.Slider(
  324. label="Top-P",
  325. minimum=0.6,
  326. maximum=0.9,
  327. value=0.7,
  328. step=0.01,
  329. )
  330. repetition_penalty = gr.Slider(
  331. label=i18n("Repetition Penalty"),
  332. minimum=1,
  333. maximum=1.5,
  334. value=1.2,
  335. step=0.01,
  336. )
  337. temperature = gr.Slider(
  338. label="Temperature",
  339. minimum=0.6,
  340. maximum=0.9,
  341. value=0.7,
  342. step=0.01,
  343. )
  344. with gr.Tab(label=i18n("Reference Audio")):
  345. gr.Markdown(
  346. i18n(
  347. "5 to 10 seconds of reference audio, useful for specifying speaker."
  348. )
  349. )
  350. enable_reference_audio = gr.Checkbox(
  351. label=i18n("Enable Reference Audio"),
  352. )
  353. reference_audio = gr.Audio(
  354. label=i18n("Reference Audio"),
  355. type="filepath",
  356. )
  357. with gr.Row():
  358. if_auto_label = gr.Checkbox(
  359. label=i18n("Auto Labeling"),
  360. min_width=100,
  361. scale=0,
  362. value=False,
  363. )
  364. reference_text = gr.Textbox(
  365. label=i18n("Reference Text"),
  366. lines=1,
  367. placeholder="在一无所知中,梦里的一天结束了,一个新的「轮回」便会开始。",
  368. value="",
  369. )
  370. with gr.Tab(label=i18n("Batch Inference")):
  371. batch_infer_num = gr.Slider(
  372. label="Batch infer nums",
  373. minimum=1,
  374. maximum=n_audios,
  375. step=1,
  376. value=1,
  377. )
  378. with gr.Column(scale=3):
  379. for _ in range(n_audios):
  380. with gr.Row():
  381. error = gr.HTML(
  382. label=i18n("Error Message"),
  383. visible=True if _ == 0 else False,
  384. )
  385. global_error_list.append(error)
  386. with gr.Row():
  387. audio = gr.Audio(
  388. label=i18n("Generated Audio"),
  389. type="numpy",
  390. interactive=False,
  391. visible=True if _ == 0 else False,
  392. )
  393. global_audio_list.append(audio)
  394. with gr.Row():
  395. stream_audio = gr.Audio(
  396. label=i18n("Streaming Audio"),
  397. streaming=True,
  398. autoplay=True,
  399. interactive=False,
  400. show_download_button=True,
  401. )
  402. with gr.Row():
  403. with gr.Column(scale=3):
  404. generate = gr.Button(
  405. value="\U0001F3A7 " + i18n("Generate"), variant="primary"
  406. )
  407. generate_stream = gr.Button(
  408. value="\U0001F3A7 " + i18n("Streaming Generate"),
  409. variant="primary",
  410. )
  411. text.input(
  412. fn=normalize_text, inputs=[text, if_refine_text], outputs=[refined_text]
  413. )
  414. if_load_asr_model.change(
  415. fn=change_if_load_asr_model,
  416. inputs=[if_load_asr_model],
  417. outputs=[if_load_asr_model],
  418. )
  419. if_auto_label.change(
  420. fn=lambda: gr.Textbox(value=""),
  421. inputs=[],
  422. outputs=[reference_text],
  423. ).then(
  424. fn=change_if_auto_label,
  425. inputs=[
  426. if_load_asr_model,
  427. if_auto_label,
  428. enable_reference_audio,
  429. reference_audio,
  430. reference_text,
  431. ],
  432. outputs=[reference_text],
  433. )
  434. # # Submit
  435. generate.click(
  436. inference_wrapper,
  437. [
  438. refined_text,
  439. enable_reference_audio,
  440. reference_audio,
  441. reference_text,
  442. max_new_tokens,
  443. chunk_length,
  444. top_p,
  445. repetition_penalty,
  446. temperature,
  447. batch_infer_num,
  448. if_load_asr_model,
  449. ],
  450. [stream_audio, *global_audio_list, *global_error_list],
  451. concurrency_limit=1,
  452. )
  453. generate_stream.click(
  454. inference_stream,
  455. [
  456. refined_text,
  457. enable_reference_audio,
  458. reference_audio,
  459. reference_text,
  460. max_new_tokens,
  461. chunk_length,
  462. top_p,
  463. repetition_penalty,
  464. temperature,
  465. ],
  466. [stream_audio, global_audio_list[0], global_error_list[0]],
  467. concurrency_limit=10,
  468. )
  469. return app
  470. def parse_args():
  471. parser = ArgumentParser()
  472. parser.add_argument(
  473. "--llama-checkpoint-path",
  474. type=Path,
  475. default="checkpoints/fish-speech-1.4",
  476. )
  477. parser.add_argument(
  478. "--decoder-checkpoint-path",
  479. type=Path,
  480. default="checkpoints/fish-speech-1.4/firefly-gan-vq-fsq-8x1024-21hz-generator.pth",
  481. )
  482. parser.add_argument("--decoder-config-name", type=str, default="firefly_gan_vq")
  483. parser.add_argument("--device", type=str, default="cuda")
  484. parser.add_argument("--half", action="store_true")
  485. parser.add_argument("--compile", action="store_true")
  486. parser.add_argument("--max-gradio-length", type=int, default=0)
  487. parser.add_argument("--theme", type=str, default="light")
  488. return parser.parse_args()
  489. if __name__ == "__main__":
  490. args = parse_args()
  491. args.precision = torch.half if args.half else torch.bfloat16
  492. logger.info("Loading Llama model...")
  493. llama_queue = launch_thread_safe_queue(
  494. checkpoint_path=args.llama_checkpoint_path,
  495. device=args.device,
  496. precision=args.precision,
  497. compile=args.compile,
  498. )
  499. logger.info("Llama model loaded, loading VQ-GAN model...")
  500. decoder_model = load_decoder_model(
  501. config_name=args.decoder_config_name,
  502. checkpoint_path=args.decoder_checkpoint_path,
  503. device=args.device,
  504. )
  505. logger.info("Decoder model loaded, warming up...")
  506. # Dry run to check if the model is loaded correctly and avoid the first-time latency
  507. list(
  508. inference(
  509. text="Hello, world!",
  510. enable_reference_audio=False,
  511. reference_audio=None,
  512. reference_text="",
  513. max_new_tokens=0,
  514. chunk_length=100,
  515. top_p=0.7,
  516. repetition_penalty=1.2,
  517. temperature=0.7,
  518. )
  519. )
  520. logger.info("Warming up done, launching the web UI...")
  521. app = build_app()
  522. app.launch(show_api=True)