webui.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. import gc
  2. import html
  3. import io
  4. import os
  5. import queue
  6. import wave
  7. from argparse import ArgumentParser
  8. from functools import partial
  9. from pathlib import Path
  10. import gradio as gr
  11. import numpy as np
  12. import pyrootutils
  13. import torch
  14. from loguru import logger
  15. from transformers import AutoTokenizer
  16. pyrootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
  17. from fish_speech.i18n import i18n
  18. from fish_speech.text.chn_text_norm.text import Text as ChnNormedText
  19. from tools.api import decode_vq_tokens, encode_reference
  20. from tools.auto_rerank import batch_asr, calculate_wer, is_chinese, load_model
  21. from tools.llama.generate import (
  22. GenerateRequest,
  23. GenerateResponse,
  24. WrappedGenerateResponse,
  25. launch_thread_safe_queue,
  26. )
  27. from tools.vqgan.inference import load_model as load_decoder_model
  28. # Make einx happy
  29. os.environ["EINX_FILTER_TRACEBACK"] = "false"
  30. HEADER_MD = f"""# Fish Speech
  31. {i18n("A text-to-speech model based on VQ-GAN and Llama developed by [Fish Audio](https://fish.audio).")}
  32. {i18n("You can find the source code [here](https://github.com/fishaudio/fish-speech) and models [here](https://huggingface.co/fishaudio/fish-speech-1).")}
  33. {i18n("Related code are released under BSD-3-Clause License, and weights are released under CC BY-NC-SA 4.0 License.")}
  34. {i18n("We are not responsible for any misuse of the model, please consider your local laws and regulations before using it.")}
  35. """
  36. TEXTBOX_PLACEHOLDER = i18n("Put your text here.")
  37. SPACE_IMPORTED = False
  38. def build_html_error_message(error):
  39. return f"""
  40. <div style="color: red;
  41. font-weight: bold;">
  42. {html.escape(str(error))}
  43. </div>
  44. """
  45. @torch.inference_mode()
  46. def inference(
  47. text,
  48. enable_reference_audio,
  49. reference_audio,
  50. reference_text,
  51. max_new_tokens,
  52. chunk_length,
  53. top_p,
  54. repetition_penalty,
  55. temperature,
  56. streaming=False,
  57. ):
  58. if args.max_gradio_length > 0 and len(text) > args.max_gradio_length:
  59. return (
  60. None,
  61. None,
  62. i18n("Text is too long, please keep it under {} characters.").format(
  63. args.max_gradio_length
  64. ),
  65. )
  66. # Parse reference audio aka prompt
  67. prompt_tokens = encode_reference(
  68. decoder_model=decoder_model,
  69. reference_audio=reference_audio,
  70. enable_reference_audio=enable_reference_audio,
  71. )
  72. # LLAMA Inference
  73. request = dict(
  74. device=decoder_model.device,
  75. max_new_tokens=max_new_tokens,
  76. text=text,
  77. top_p=top_p,
  78. repetition_penalty=repetition_penalty,
  79. temperature=temperature,
  80. compile=args.compile,
  81. iterative_prompt=chunk_length > 0,
  82. chunk_length=chunk_length,
  83. max_length=2048,
  84. prompt_tokens=prompt_tokens if enable_reference_audio else None,
  85. prompt_text=reference_text if enable_reference_audio else None,
  86. )
  87. response_queue = queue.Queue()
  88. llama_queue.put(
  89. GenerateRequest(
  90. request=request,
  91. response_queue=response_queue,
  92. )
  93. )
  94. if streaming:
  95. yield wav_chunk_header(), None, None
  96. segments = []
  97. while True:
  98. result: WrappedGenerateResponse = response_queue.get()
  99. if result.status == "error":
  100. yield None, None, build_html_error_message(result.response)
  101. break
  102. result: GenerateResponse = result.response
  103. if result.action == "next":
  104. break
  105. with torch.autocast(
  106. device_type=(
  107. "cpu"
  108. if decoder_model.device.type == "mps"
  109. else decoder_model.device.type
  110. ),
  111. dtype=args.precision,
  112. ):
  113. fake_audios = decode_vq_tokens(
  114. decoder_model=decoder_model,
  115. codes=result.codes,
  116. )
  117. fake_audios = fake_audios.float().cpu().numpy()
  118. segments.append(fake_audios)
  119. if streaming:
  120. yield (fake_audios * 32768).astype(np.int16).tobytes(), None, None
  121. if len(segments) == 0:
  122. return (
  123. None,
  124. None,
  125. build_html_error_message(
  126. i18n("No audio generated, please check the input text.")
  127. ),
  128. )
  129. # No matter streaming or not, we need to return the final audio
  130. audio = np.concatenate(segments, axis=0)
  131. yield None, (decoder_model.spec_transform.sample_rate, audio), None
  132. if torch.cuda.is_available():
  133. torch.cuda.empty_cache()
  134. gc.collect()
  135. def inference_with_auto_rerank(
  136. text,
  137. enable_reference_audio,
  138. reference_audio,
  139. reference_text,
  140. max_new_tokens,
  141. chunk_length,
  142. top_p,
  143. repetition_penalty,
  144. temperature,
  145. streaming=False,
  146. use_auto_rerank=True,
  147. ):
  148. if not use_auto_rerank:
  149. return inference(
  150. text,
  151. enable_reference_audio,
  152. reference_audio,
  153. reference_text,
  154. max_new_tokens,
  155. chunk_length,
  156. top_p,
  157. repetition_penalty,
  158. temperature,
  159. streaming,
  160. )
  161. zh_model, en_model = load_model()
  162. max_attempts = 2
  163. best_wer = float("inf")
  164. best_audio = None
  165. best_sample_rate = None
  166. for attempt in range(max_attempts):
  167. audio_generator = inference(
  168. text,
  169. enable_reference_audio,
  170. reference_audio,
  171. reference_text,
  172. max_new_tokens,
  173. chunk_length,
  174. top_p,
  175. repetition_penalty,
  176. temperature,
  177. streaming=False,
  178. )
  179. # 获取音频数据
  180. for _ in audio_generator:
  181. pass
  182. _, (sample_rate, audio), message = _
  183. if audio is None:
  184. return None, None, message
  185. asr_result = batch_asr(
  186. zh_model if is_chinese(text) else en_model, [audio], sample_rate
  187. )[0]
  188. wer = calculate_wer(text, asr_result["text"])
  189. if wer <= 0.3 and not asr_result["huge_gap"]:
  190. return None, (sample_rate, audio), None
  191. if wer < best_wer:
  192. best_wer = wer
  193. best_audio = audio
  194. best_sample_rate = sample_rate
  195. if attempt == max_attempts - 1:
  196. break
  197. return None, (best_sample_rate, best_audio), None
  198. inference_stream = partial(inference_with_auto_rerank, streaming=True)
  199. n_audios = 4
  200. global_audio_list = []
  201. global_error_list = []
  202. def inference_wrapper(
  203. text,
  204. enable_reference_audio,
  205. reference_audio,
  206. reference_text,
  207. max_new_tokens,
  208. chunk_length,
  209. top_p,
  210. repetition_penalty,
  211. temperature,
  212. batch_infer_num,
  213. ):
  214. audios = []
  215. errors = []
  216. for _ in range(batch_infer_num):
  217. result = inference_with_auto_rerank(
  218. text,
  219. enable_reference_audio,
  220. reference_audio,
  221. reference_text,
  222. max_new_tokens,
  223. chunk_length,
  224. top_p,
  225. repetition_penalty,
  226. temperature,
  227. )
  228. _, audio_data, error_message = result
  229. audios.append(
  230. gr.Audio(value=audio_data if audio_data else None, visible=True),
  231. )
  232. errors.append(
  233. gr.HTML(value=error_message if error_message else None, visible=True),
  234. )
  235. for _ in range(batch_infer_num, n_audios):
  236. audios.append(
  237. gr.Audio(value=None, visible=False),
  238. )
  239. errors.append(
  240. gr.HTML(value=None, visible=False),
  241. )
  242. return None, *audios, *errors
  243. def wav_chunk_header(sample_rate=44100, bit_depth=16, channels=1):
  244. buffer = io.BytesIO()
  245. with wave.open(buffer, "wb") as wav_file:
  246. wav_file.setnchannels(channels)
  247. wav_file.setsampwidth(bit_depth // 8)
  248. wav_file.setframerate(sample_rate)
  249. wav_header_bytes = buffer.getvalue()
  250. buffer.close()
  251. return wav_header_bytes
  252. def normalize_text(user_input, use_normalization):
  253. if use_normalization:
  254. return ChnNormedText(raw_text=user_input).normalize()
  255. else:
  256. return user_input
  257. def build_app():
  258. with gr.Blocks(theme=gr.themes.Base()) as app:
  259. gr.Markdown(HEADER_MD)
  260. # Use light theme by default
  261. app.load(
  262. None,
  263. None,
  264. js="() => {const params = new URLSearchParams(window.location.search);if (!params.has('__theme')) {params.set('__theme', 'light');window.location.search = params.toString();}}",
  265. )
  266. # Inference
  267. with gr.Row():
  268. with gr.Column(scale=3):
  269. text = gr.Textbox(
  270. label=i18n("Input Text"), placeholder=TEXTBOX_PLACEHOLDER, lines=10
  271. )
  272. refined_text = gr.Textbox(
  273. label=i18n("Realtime Transform Text"),
  274. placeholder=i18n(
  275. "Normalization Result Preview (Currently Only Chinese)"
  276. ),
  277. lines=5,
  278. interactive=False,
  279. )
  280. with gr.Row():
  281. if_refine_text = gr.Checkbox(
  282. label=i18n("Text Normalization"),
  283. value=True,
  284. scale=0,
  285. min_width=150,
  286. )
  287. with gr.Row():
  288. with gr.Tab(label=i18n("Advanced Config")):
  289. chunk_length = gr.Slider(
  290. label=i18n("Iterative Prompt Length, 0 means off"),
  291. minimum=0,
  292. maximum=500,
  293. value=100,
  294. step=8,
  295. )
  296. max_new_tokens = gr.Slider(
  297. label=i18n("Maximum tokens per batch, 0 means no limit"),
  298. minimum=0,
  299. maximum=2048,
  300. value=1024, # 0 means no limit
  301. step=8,
  302. )
  303. top_p = gr.Slider(
  304. label="Top-P",
  305. minimum=0.6,
  306. maximum=0.9,
  307. value=0.7,
  308. step=0.01,
  309. )
  310. repetition_penalty = gr.Slider(
  311. label=i18n("Repetition Penalty"),
  312. minimum=1,
  313. maximum=1.5,
  314. value=1.2,
  315. step=0.01,
  316. )
  317. temperature = gr.Slider(
  318. label="Temperature",
  319. minimum=0.6,
  320. maximum=0.9,
  321. value=0.7,
  322. step=0.01,
  323. )
  324. with gr.Tab(label=i18n("Reference Audio")):
  325. gr.Markdown(
  326. i18n(
  327. "5 to 10 seconds of reference audio, useful for specifying speaker."
  328. )
  329. )
  330. enable_reference_audio = gr.Checkbox(
  331. label=i18n("Enable Reference Audio"),
  332. )
  333. reference_audio = gr.Audio(
  334. label=i18n("Reference Audio"),
  335. type="filepath",
  336. )
  337. reference_text = gr.Textbox(
  338. label=i18n("Reference Text"),
  339. placeholder=i18n("Reference Text"),
  340. lines=1,
  341. value="在一无所知中,梦里的一天结束了,一个新的「轮回」便会开始。",
  342. )
  343. with gr.Tab(label=i18n("Batch Inference")):
  344. batch_infer_num = gr.Slider(
  345. label="Batch infer nums",
  346. minimum=1,
  347. maximum=n_audios,
  348. step=1,
  349. value=1,
  350. )
  351. with gr.Column(scale=3):
  352. for _ in range(n_audios):
  353. with gr.Row():
  354. error = gr.HTML(
  355. label=i18n("Error Message"),
  356. visible=True if _ == 0 else False,
  357. )
  358. global_error_list.append(error)
  359. with gr.Row():
  360. audio = gr.Audio(
  361. label=i18n("Generated Audio"),
  362. type="numpy",
  363. interactive=False,
  364. visible=True if _ == 0 else False,
  365. )
  366. global_audio_list.append(audio)
  367. with gr.Row():
  368. stream_audio = gr.Audio(
  369. label=i18n("Streaming Audio"),
  370. streaming=True,
  371. autoplay=True,
  372. interactive=False,
  373. show_download_button=True,
  374. )
  375. with gr.Row():
  376. with gr.Column(scale=3):
  377. generate = gr.Button(
  378. value="\U0001F3A7 " + i18n("Generate"), variant="primary"
  379. )
  380. generate_stream = gr.Button(
  381. value="\U0001F3A7 " + i18n("Streaming Generate"),
  382. variant="primary",
  383. )
  384. text.input(
  385. fn=normalize_text, inputs=[text, if_refine_text], outputs=[refined_text]
  386. )
  387. # # Submit
  388. generate.click(
  389. inference_wrapper,
  390. [
  391. refined_text,
  392. enable_reference_audio,
  393. reference_audio,
  394. reference_text,
  395. max_new_tokens,
  396. chunk_length,
  397. top_p,
  398. repetition_penalty,
  399. temperature,
  400. batch_infer_num,
  401. ],
  402. [stream_audio, *global_audio_list, *global_error_list],
  403. concurrency_limit=1,
  404. )
  405. generate_stream.click(
  406. inference_stream,
  407. [
  408. refined_text,
  409. enable_reference_audio,
  410. reference_audio,
  411. reference_text,
  412. max_new_tokens,
  413. chunk_length,
  414. top_p,
  415. repetition_penalty,
  416. temperature,
  417. ],
  418. [stream_audio, global_audio_list[0], global_error_list[0]],
  419. concurrency_limit=10,
  420. )
  421. return app
  422. def parse_args():
  423. parser = ArgumentParser()
  424. parser.add_argument(
  425. "--llama-checkpoint-path",
  426. type=Path,
  427. default="checkpoints/fish-speech-1.2-sft",
  428. )
  429. parser.add_argument(
  430. "--decoder-checkpoint-path",
  431. type=Path,
  432. default="checkpoints/fish-speech-1.2-sft/firefly-gan-vq-fsq-4x1024-42hz-generator.pth",
  433. )
  434. parser.add_argument("--decoder-config-name", type=str, default="firefly_gan_vq")
  435. parser.add_argument("--device", type=str, default="cuda")
  436. parser.add_argument("--half", action="store_true")
  437. parser.add_argument("--compile", action="store_true")
  438. parser.add_argument("--max-gradio-length", type=int, default=0)
  439. return parser.parse_args()
  440. if __name__ == "__main__":
  441. args = parse_args()
  442. args.precision = torch.half if args.half else torch.bfloat16
  443. logger.info("Loading Llama model...")
  444. llama_queue = launch_thread_safe_queue(
  445. checkpoint_path=args.llama_checkpoint_path,
  446. device=args.device,
  447. precision=args.precision,
  448. compile=args.compile,
  449. )
  450. logger.info("Llama model loaded, loading VQ-GAN model...")
  451. decoder_model = load_decoder_model(
  452. config_name=args.decoder_config_name,
  453. checkpoint_path=args.decoder_checkpoint_path,
  454. device=args.device,
  455. )
  456. logger.info("Decoder model loaded, warming up...")
  457. # Dry run to check if the model is loaded correctly and avoid the first-time latency
  458. list(
  459. inference(
  460. text="Hello, world!",
  461. enable_reference_audio=False,
  462. reference_audio=None,
  463. reference_text="",
  464. max_new_tokens=0,
  465. chunk_length=100,
  466. top_p=0.7,
  467. repetition_penalty=1.2,
  468. temperature=0.7,
  469. )
  470. )
  471. logger.info("Warming up done, launching the web UI...")
  472. app = build_app()
  473. app.launch(show_api=True)