webui.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475
  1. import gc
  2. import html
  3. import io
  4. import os
  5. import queue
  6. import wave
  7. from argparse import ArgumentParser
  8. from functools import partial
  9. from pathlib import Path
  10. import gradio as gr
  11. import numpy as np
  12. import pyrootutils
  13. import torch
  14. from loguru import logger
  15. from transformers import AutoTokenizer
  16. pyrootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
  17. from fish_speech.i18n import i18n
  18. from tools.api import decode_vq_tokens, encode_reference
  19. from tools.llama.generate import (
  20. GenerateRequest,
  21. GenerateResponse,
  22. WrappedGenerateResponse,
  23. launch_thread_safe_queue,
  24. )
  25. from tools.vqgan.inference import load_model as load_decoder_model
  26. # Make einx happy
  27. os.environ["EINX_FILTER_TRACEBACK"] = "false"
  28. HEADER_MD = f"""# Fish Speech
  29. {i18n("A text-to-speech model based on VQ-GAN and Llama developed by [Fish Audio](https://fish.audio).")}
  30. {i18n("You can find the source code [here](https://github.com/fishaudio/fish-speech) and models [here](https://huggingface.co/fishaudio/fish-speech-1).")}
  31. {i18n("Related code are released under BSD-3-Clause License, and weights are released under CC BY-NC-SA 4.0 License.")}
  32. {i18n("We are not responsible for any misuse of the model, please consider your local laws and regulations before using it.")}
  33. """
  34. TEXTBOX_PLACEHOLDER = i18n("Put your text here.")
  35. SPACE_IMPORTED = False
  36. cached_audio = np.zeros((1,))
  37. def build_html_error_message(error):
  38. return f"""
  39. <div style="color: red;
  40. font-weight: bold;">
  41. {html.escape(str(error))}
  42. </div>
  43. """
  44. @torch.inference_mode()
  45. def inference(
  46. text,
  47. enable_reference_audio,
  48. reference_audio,
  49. reference_text,
  50. max_new_tokens,
  51. chunk_length,
  52. top_p,
  53. repetition_penalty,
  54. temperature,
  55. speaker,
  56. streaming=False,
  57. ):
  58. if args.max_gradio_length > 0 and len(text) > args.max_gradio_length:
  59. return (
  60. None,
  61. None,
  62. i18n("Text is too long, please keep it under {} characters.").format(
  63. args.max_gradio_length
  64. ),
  65. )
  66. # Parse reference audio aka prompt
  67. prompt_tokens, reference_embedding = encode_reference(
  68. decoder_model=decoder_model,
  69. reference_audio=reference_audio,
  70. enable_reference_audio=enable_reference_audio,
  71. )
  72. # LLAMA Inference
  73. request = dict(
  74. tokenizer=llama_tokenizer,
  75. device=decoder_model.device,
  76. max_new_tokens=max_new_tokens,
  77. text=text,
  78. top_p=top_p,
  79. repetition_penalty=repetition_penalty,
  80. temperature=temperature,
  81. compile=args.compile,
  82. iterative_prompt=chunk_length > 0,
  83. chunk_length=chunk_length,
  84. max_length=args.max_length,
  85. speaker=speaker if speaker else None,
  86. prompt_tokens=prompt_tokens if enable_reference_audio else None,
  87. prompt_text=reference_text if enable_reference_audio else None,
  88. )
  89. response_queue = queue.Queue()
  90. llama_queue.put(
  91. GenerateRequest(
  92. request=request,
  93. response_queue=response_queue,
  94. )
  95. )
  96. if streaming:
  97. yield wav_chunk_header(), None, None
  98. segments = []
  99. while True:
  100. result: WrappedGenerateResponse = response_queue.get()
  101. if result.status == "error":
  102. yield None, None, build_html_error_message(result.response)
  103. break
  104. result: GenerateResponse = result.response
  105. if result.action == "next":
  106. break
  107. text_tokens = llama_tokenizer.encode(result.text, return_tensors="pt").to(
  108. decoder_model.device
  109. )
  110. with torch.autocast(
  111. device_type=decoder_model.device.type, dtype=args.precision
  112. ):
  113. fake_audios = decode_vq_tokens(
  114. decoder_model=decoder_model,
  115. codes=result.codes,
  116. text_tokens=text_tokens,
  117. reference_embedding=reference_embedding,
  118. )
  119. fake_audios = fake_audios.float().cpu().numpy()
  120. segments.append(fake_audios)
  121. if streaming:
  122. yield (fake_audios * 32768).astype(np.int16).tobytes(), None, None
  123. if len(segments) == 0:
  124. return (
  125. None,
  126. None,
  127. build_html_error_message(
  128. i18n("No audio generated, please check the input text.")
  129. ),
  130. )
  131. # No matter streaming or not, we need to return the final audio
  132. audio = np.concatenate(segments, axis=0)
  133. yield None, (decoder_model.sampling_rate, audio), None
  134. if torch.cuda.is_available():
  135. torch.cuda.empty_cache()
  136. gc.collect()
  137. inference_stream = partial(inference, streaming=True)
  138. n_audios = 4
  139. global_audio_list = []
  140. global_error_list = []
  141. def inference_wrapper(
  142. text,
  143. enable_reference_audio,
  144. reference_audio,
  145. reference_text,
  146. max_new_tokens,
  147. chunk_length,
  148. top_p,
  149. repetition_penalty,
  150. temperature,
  151. speaker,
  152. batch_infer_num,
  153. ):
  154. audios = []
  155. errors = []
  156. for _ in range(batch_infer_num):
  157. items = inference(
  158. text,
  159. enable_reference_audio,
  160. reference_audio,
  161. reference_text,
  162. max_new_tokens,
  163. chunk_length,
  164. top_p,
  165. repetition_penalty,
  166. temperature,
  167. speaker,
  168. )
  169. try:
  170. item = next(items)
  171. except StopIteration:
  172. print("No more audio data available.")
  173. audios.append(
  174. gr.Audio(value=item[1] if (item and item[1]) else None, visible=True),
  175. )
  176. errors.append(
  177. gr.HTML(value=item[2] if (item and item[2]) else None, visible=True),
  178. )
  179. for _ in range(batch_infer_num, n_audios):
  180. audios.append(
  181. gr.Audio(value=None, visible=False),
  182. )
  183. errors.append(
  184. gr.HTML(value=None, visible=False),
  185. )
  186. return None, *audios, *errors
  187. def wav_chunk_header(sample_rate=44100, bit_depth=16, channels=1):
  188. buffer = io.BytesIO()
  189. with wave.open(buffer, "wb") as wav_file:
  190. wav_file.setnchannels(channels)
  191. wav_file.setsampwidth(bit_depth // 8)
  192. wav_file.setframerate(sample_rate)
  193. wav_header_bytes = buffer.getvalue()
  194. buffer.close()
  195. return wav_header_bytes
  196. def build_app():
  197. with gr.Blocks(theme=gr.themes.Base()) as app:
  198. gr.Markdown(HEADER_MD)
  199. # Use light theme by default
  200. app.load(
  201. None,
  202. None,
  203. js="() => {const params = new URLSearchParams(window.location.search);if (!params.has('__theme')) {params.set('__theme', 'light');window.location.search = params.toString();}}",
  204. )
  205. # Inference
  206. with gr.Row():
  207. with gr.Column(scale=3):
  208. text = gr.Textbox(
  209. label=i18n("Input Text"), placeholder=TEXTBOX_PLACEHOLDER, lines=15
  210. )
  211. with gr.Row():
  212. with gr.Tab(label=i18n("Advanced Config")):
  213. chunk_length = gr.Slider(
  214. label=i18n("Iterative Prompt Length, 0 means off"),
  215. minimum=0,
  216. maximum=500,
  217. value=150,
  218. step=8,
  219. )
  220. max_new_tokens = gr.Slider(
  221. label=i18n("Maximum tokens per batch, 0 means no limit"),
  222. minimum=0,
  223. maximum=args.max_length,
  224. value=0, # 0 means no limit
  225. step=8,
  226. )
  227. top_p = gr.Slider(
  228. label="Top-P", minimum=0, maximum=1, value=0.7, step=0.01
  229. )
  230. repetition_penalty = gr.Slider(
  231. label=i18n("Repetition Penalty"),
  232. minimum=0,
  233. maximum=2,
  234. value=1.5,
  235. step=0.01,
  236. )
  237. temperature = gr.Slider(
  238. label="Temperature",
  239. minimum=0,
  240. maximum=2,
  241. value=0.7,
  242. step=0.01,
  243. )
  244. speaker = gr.Textbox(
  245. label=i18n("Speaker"),
  246. placeholder=i18n("Type name of the speaker"),
  247. lines=1,
  248. )
  249. with gr.Tab(label=i18n("Reference Audio")):
  250. gr.Markdown(
  251. i18n(
  252. "5 to 10 seconds of reference audio, useful for specifying speaker."
  253. )
  254. )
  255. enable_reference_audio = gr.Checkbox(
  256. label=i18n("Enable Reference Audio"),
  257. )
  258. reference_audio = gr.Audio(
  259. label=i18n("Reference Audio"),
  260. type="filepath",
  261. )
  262. reference_text = gr.Textbox(
  263. label=i18n("Reference Text"),
  264. placeholder=i18n("Reference Text"),
  265. lines=1,
  266. value="在一无所知中,梦里的一天结束了,一个新的「轮回」便会开始。",
  267. )
  268. with gr.Tab(label=i18n("Batch Inference")):
  269. batch_infer_num = gr.Slider(
  270. label="Batch infer nums",
  271. minimum=1,
  272. maximum=n_audios,
  273. step=1,
  274. value=1,
  275. )
  276. with gr.Column(scale=3):
  277. for _ in range(n_audios):
  278. with gr.Row():
  279. error = gr.HTML(
  280. label=i18n("Error Message"),
  281. visible=True if _ == 0 else False,
  282. )
  283. global_error_list.append(error)
  284. with gr.Row():
  285. audio = gr.Audio(
  286. label=i18n("Generated Audio"),
  287. type="numpy",
  288. interactive=False,
  289. visible=True if _ == 0 else False,
  290. )
  291. global_audio_list.append(audio)
  292. with gr.Row():
  293. stream_audio = gr.Audio(
  294. label=i18n("Streaming Audio"),
  295. streaming=True,
  296. autoplay=True,
  297. interactive=False,
  298. )
  299. with gr.Row():
  300. with gr.Column(scale=3):
  301. generate = gr.Button(
  302. value="\U0001F3A7 " + i18n("Generate"), variant="primary"
  303. )
  304. generate_stream = gr.Button(
  305. value="\U0001F3A7 " + i18n("Streaming Generate"),
  306. variant="primary",
  307. )
  308. # # Submit
  309. generate.click(
  310. inference_wrapper,
  311. [
  312. text,
  313. enable_reference_audio,
  314. reference_audio,
  315. reference_text,
  316. max_new_tokens,
  317. chunk_length,
  318. top_p,
  319. repetition_penalty,
  320. temperature,
  321. speaker,
  322. batch_infer_num,
  323. ],
  324. [stream_audio, *global_audio_list, *global_error_list],
  325. concurrency_limit=1,
  326. )
  327. generate_stream.click(
  328. inference_stream,
  329. [
  330. text,
  331. enable_reference_audio,
  332. reference_audio,
  333. reference_text,
  334. max_new_tokens,
  335. chunk_length,
  336. top_p,
  337. repetition_penalty,
  338. temperature,
  339. speaker,
  340. ],
  341. [stream_audio, global_audio_list[0], global_error_list[0]],
  342. concurrency_limit=10,
  343. )
  344. return app
  345. def parse_args():
  346. parser = ArgumentParser()
  347. parser.add_argument(
  348. "--llama-checkpoint-path",
  349. type=Path,
  350. default="checkpoints/text2semantic-sft-medium-v1-4k.pth",
  351. )
  352. parser.add_argument(
  353. "--llama-config-name", type=str, default="dual_ar_2_codebook_medium"
  354. )
  355. parser.add_argument(
  356. "--decoder-checkpoint-path",
  357. type=Path,
  358. default="checkpoints/vq-gan-group-fsq-2x1024.pth",
  359. )
  360. parser.add_argument("--decoder-config-name", type=str, default="vqgan_pretrain")
  361. parser.add_argument("--tokenizer", type=str, default="fishaudio/fish-speech-1")
  362. parser.add_argument("--device", type=str, default="cuda")
  363. parser.add_argument("--half", action="store_true")
  364. parser.add_argument("--max-length", type=int, default=2048)
  365. parser.add_argument("--compile", action="store_true")
  366. parser.add_argument("--max-gradio-length", type=int, default=0)
  367. return parser.parse_args()
  368. if __name__ == "__main__":
  369. args = parse_args()
  370. args.precision = torch.half if args.half else torch.bfloat16
  371. logger.info("Loading Llama model...")
  372. llama_queue = launch_thread_safe_queue(
  373. config_name=args.llama_config_name,
  374. checkpoint_path=args.llama_checkpoint_path,
  375. device=args.device,
  376. precision=args.precision,
  377. max_length=args.max_length,
  378. compile=args.compile,
  379. )
  380. llama_tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
  381. logger.info("Llama model loaded, loading VQ-GAN model...")
  382. decoder_model = load_decoder_model(
  383. config_name=args.decoder_config_name,
  384. checkpoint_path=args.decoder_checkpoint_path,
  385. device=args.device,
  386. )
  387. logger.info("Decoder model loaded, warming up...")
  388. # Dry run to check if the model is loaded correctly and avoid the first-time latency
  389. list(
  390. inference(
  391. text="Hello, world!",
  392. enable_reference_audio=False,
  393. reference_audio=None,
  394. reference_text="",
  395. max_new_tokens=0,
  396. chunk_length=150,
  397. top_p=0.7,
  398. repetition_penalty=1.5,
  399. temperature=0.7,
  400. speaker=None,
  401. )
  402. )
  403. logger.info("Warming up done, launching the web UI...")
  404. app = build_app()
  405. app.launch(show_api=True)