api.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484
  1. import io
  2. import os
  3. import queue
  4. import sys
  5. import traceback
  6. import wave
  7. from argparse import ArgumentParser
  8. from http import HTTPStatus
  9. from pathlib import Path
  10. from typing import Annotated, Any
  11. import numpy as np
  12. import ormsgpack
  13. import pyrootutils
  14. import soundfile as sf
  15. import torch
  16. import torchaudio
  17. from baize.datastructures import ContentType
  18. from kui.asgi import (
  19. Body,
  20. FactoryClass,
  21. HTTPException,
  22. HttpRequest,
  23. HttpView,
  24. JSONResponse,
  25. Kui,
  26. OpenAPI,
  27. StreamResponse,
  28. )
  29. from kui.asgi.routing import MultimethodRoutes
  30. from loguru import logger
  31. pyrootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
  32. # from fish_speech.models.vqgan.lit_module import VQGAN
  33. from fish_speech.models.vqgan.modules.firefly import FireflyArchitecture
  34. from fish_speech.text.chn_text_norm.text import Text as ChnNormedText
  35. from fish_speech.utils import autocast_exclude_mps
  36. from tools.commons import ServeTTSRequest
  37. from tools.file import AUDIO_EXTENSIONS, audio_to_bytes, list_files, read_ref_text
  38. from tools.llama.generate import (
  39. GenerateRequest,
  40. GenerateResponse,
  41. WrappedGenerateResponse,
  42. launch_thread_safe_queue,
  43. )
  44. from tools.vqgan.inference import load_model as load_decoder_model
  45. def wav_chunk_header(sample_rate=44100, bit_depth=16, channels=1):
  46. buffer = io.BytesIO()
  47. with wave.open(buffer, "wb") as wav_file:
  48. wav_file.setnchannels(channels)
  49. wav_file.setsampwidth(bit_depth // 8)
  50. wav_file.setframerate(sample_rate)
  51. wav_header_bytes = buffer.getvalue()
  52. buffer.close()
  53. return wav_header_bytes
  54. # Define utils for web server
  55. async def http_execption_handler(exc: HTTPException):
  56. return JSONResponse(
  57. dict(
  58. statusCode=exc.status_code,
  59. message=exc.content,
  60. error=HTTPStatus(exc.status_code).phrase,
  61. ),
  62. exc.status_code,
  63. exc.headers,
  64. )
  65. async def other_exception_handler(exc: "Exception"):
  66. traceback.print_exc()
  67. status = HTTPStatus.INTERNAL_SERVER_ERROR
  68. return JSONResponse(
  69. dict(statusCode=status, message=str(exc), error=status.phrase),
  70. status,
  71. )
  72. def load_audio(reference_audio, sr):
  73. if len(reference_audio) > 255 or not Path(reference_audio).exists():
  74. audio_data = reference_audio
  75. reference_audio = io.BytesIO(audio_data)
  76. waveform, original_sr = torchaudio.load(
  77. reference_audio,
  78. backend="soundfile", # not every linux release supports 'sox' or 'ffmpeg'
  79. )
  80. if waveform.shape[0] > 1:
  81. waveform = torch.mean(waveform, dim=0, keepdim=True)
  82. if original_sr != sr:
  83. resampler = torchaudio.transforms.Resample(orig_freq=original_sr, new_freq=sr)
  84. waveform = resampler(waveform)
  85. audio = waveform.squeeze().numpy()
  86. return audio
  87. def encode_reference(*, decoder_model, reference_audio, enable_reference_audio):
  88. if enable_reference_audio and reference_audio is not None:
  89. # Load audios, and prepare basic info here
  90. reference_audio_content = load_audio(
  91. reference_audio, decoder_model.spec_transform.sample_rate
  92. )
  93. audios = torch.from_numpy(reference_audio_content).to(decoder_model.device)[
  94. None, None, :
  95. ]
  96. audio_lengths = torch.tensor(
  97. [audios.shape[2]], device=decoder_model.device, dtype=torch.long
  98. )
  99. logger.info(
  100. f"Loaded audio with {audios.shape[2] / decoder_model.spec_transform.sample_rate:.2f} seconds"
  101. )
  102. # VQ Encoder
  103. if isinstance(decoder_model, FireflyArchitecture):
  104. prompt_tokens = decoder_model.encode(audios, audio_lengths)[0][0]
  105. logger.info(f"Encoded prompt: {prompt_tokens.shape}")
  106. else:
  107. prompt_tokens = None
  108. logger.info("No reference audio provided")
  109. return prompt_tokens
  110. def decode_vq_tokens(
  111. *,
  112. decoder_model,
  113. codes,
  114. ):
  115. feature_lengths = torch.tensor([codes.shape[1]], device=decoder_model.device)
  116. logger.info(f"VQ features: {codes.shape}")
  117. if isinstance(decoder_model, FireflyArchitecture):
  118. # VQGAN Inference
  119. return decoder_model.decode(
  120. indices=codes[None],
  121. feature_lengths=feature_lengths,
  122. )[0].squeeze()
  123. raise ValueError(f"Unknown model type: {type(decoder_model)}")
  124. routes = MultimethodRoutes(base_class=HttpView)
  125. def get_content_type(audio_format):
  126. if audio_format == "wav":
  127. return "audio/wav"
  128. elif audio_format == "flac":
  129. return "audio/flac"
  130. elif audio_format == "mp3":
  131. return "audio/mpeg"
  132. else:
  133. return "application/octet-stream"
  134. @torch.inference_mode()
  135. def inference(req: ServeTTSRequest):
  136. global prompt_tokens, prompt_texts
  137. idstr: str | None = req.reference_id
  138. if idstr is not None:
  139. ref_folder = Path("references") / idstr
  140. ref_folder.mkdir(parents=True, exist_ok=True)
  141. ref_audios = list_files(
  142. ref_folder, AUDIO_EXTENSIONS, recursive=True, sort=False
  143. )
  144. if req.use_memory_cache == "never" or (
  145. req.use_memory_cache == "on-demand" and len(prompt_tokens) == 0
  146. ):
  147. prompt_tokens = [
  148. encode_reference(
  149. decoder_model=decoder_model,
  150. reference_audio=audio_to_bytes(str(ref_audio)),
  151. enable_reference_audio=True,
  152. )
  153. for ref_audio in ref_audios
  154. ]
  155. prompt_texts = [
  156. read_ref_text(str(ref_audio.with_suffix(".lab")))
  157. for ref_audio in ref_audios
  158. ]
  159. else:
  160. logger.info("Use same references")
  161. else:
  162. # Parse reference audio aka prompt
  163. refs = req.references
  164. if req.use_memory_cache == "never" or (
  165. req.use_memory_cache == "on-demand" and len(prompt_tokens) == 0
  166. ):
  167. prompt_tokens = [
  168. encode_reference(
  169. decoder_model=decoder_model,
  170. reference_audio=ref.audio,
  171. enable_reference_audio=True,
  172. )
  173. for ref in refs
  174. ]
  175. prompt_texts = [ref.text for ref in refs]
  176. else:
  177. logger.info("Use same references")
  178. # LLAMA Inference
  179. request = dict(
  180. device=decoder_model.device,
  181. max_new_tokens=req.max_new_tokens,
  182. text=(
  183. req.text
  184. if not req.normalize
  185. else ChnNormedText(raw_text=req.text).normalize()
  186. ),
  187. top_p=req.top_p,
  188. repetition_penalty=req.repetition_penalty,
  189. temperature=req.temperature,
  190. compile=args.compile,
  191. iterative_prompt=req.chunk_length > 0,
  192. chunk_length=req.chunk_length,
  193. max_length=4096,
  194. prompt_tokens=prompt_tokens,
  195. prompt_text=prompt_texts,
  196. )
  197. response_queue = queue.Queue()
  198. llama_queue.put(
  199. GenerateRequest(
  200. request=request,
  201. response_queue=response_queue,
  202. )
  203. )
  204. if req.streaming:
  205. yield wav_chunk_header()
  206. segments = []
  207. while True:
  208. result: WrappedGenerateResponse = response_queue.get()
  209. if result.status == "error":
  210. raise result.response
  211. break
  212. result: GenerateResponse = result.response
  213. if result.action == "next":
  214. break
  215. with autocast_exclude_mps(
  216. device_type=decoder_model.device.type, dtype=args.precision
  217. ):
  218. fake_audios = decode_vq_tokens(
  219. decoder_model=decoder_model,
  220. codes=result.codes,
  221. )
  222. fake_audios = fake_audios.float().cpu().numpy()
  223. if req.streaming:
  224. yield (fake_audios * 32768).astype(np.int16).tobytes()
  225. else:
  226. segments.append(fake_audios)
  227. if req.streaming:
  228. return
  229. if len(segments) == 0:
  230. raise HTTPException(
  231. HTTPStatus.INTERNAL_SERVER_ERROR,
  232. content="No audio generated, please check the input text.",
  233. )
  234. fake_audios = np.concatenate(segments, axis=0)
  235. yield fake_audios
  236. async def inference_async(req: ServeTTSRequest):
  237. for chunk in inference(req):
  238. yield chunk
  239. async def buffer_to_async_generator(buffer):
  240. yield buffer
  241. @routes.http.post("/v1/tts")
  242. async def api_invoke_model(
  243. req: Annotated[ServeTTSRequest, Body(exclusive=True)],
  244. ):
  245. """
  246. Invoke model and generate audio
  247. """
  248. if args.max_text_length > 0 and len(req.text) > args.max_text_length:
  249. raise HTTPException(
  250. HTTPStatus.BAD_REQUEST,
  251. content=f"Text is too long, max length is {args.max_text_length}",
  252. )
  253. if req.streaming and req.format != "wav":
  254. raise HTTPException(
  255. HTTPStatus.BAD_REQUEST,
  256. content="Streaming only supports WAV format",
  257. )
  258. if req.streaming:
  259. return StreamResponse(
  260. iterable=inference_async(req),
  261. headers={
  262. "Content-Disposition": f"attachment; filename=audio.{req.format}",
  263. },
  264. content_type=get_content_type(req.format),
  265. )
  266. else:
  267. fake_audios = next(inference(req))
  268. buffer = io.BytesIO()
  269. sf.write(
  270. buffer,
  271. fake_audios,
  272. decoder_model.spec_transform.sample_rate,
  273. format=req.format,
  274. )
  275. return StreamResponse(
  276. iterable=buffer_to_async_generator(buffer.getvalue()),
  277. headers={
  278. "Content-Disposition": f"attachment; filename=audio.{req.format}",
  279. },
  280. content_type=get_content_type(req.format),
  281. )
  282. @routes.http.post("/v1/health")
  283. async def api_health():
  284. """
  285. Health check
  286. """
  287. return JSONResponse({"status": "ok"})
  288. def parse_args():
  289. parser = ArgumentParser()
  290. parser.add_argument(
  291. "--llama-checkpoint-path",
  292. type=str,
  293. default="checkpoints/fish-speech-1.4",
  294. )
  295. parser.add_argument(
  296. "--decoder-checkpoint-path",
  297. type=str,
  298. default="checkpoints/fish-speech-1.4/firefly-gan-vq-fsq-8x1024-21hz-generator.pth",
  299. )
  300. parser.add_argument("--decoder-config-name", type=str, default="firefly_gan_vq")
  301. parser.add_argument("--device", type=str, default="cuda")
  302. parser.add_argument("--half", action="store_true")
  303. parser.add_argument("--compile", action="store_true")
  304. parser.add_argument("--max-text-length", type=int, default=0)
  305. parser.add_argument("--listen", type=str, default="127.0.0.1:8080")
  306. parser.add_argument("--workers", type=int, default=1)
  307. return parser.parse_args()
  308. # Define Kui app
  309. openapi = OpenAPI(
  310. {
  311. "title": "Fish Speech API",
  312. "version": "1.4.2",
  313. },
  314. ).routes
  315. class MsgPackRequest(HttpRequest):
  316. async def data(
  317. self,
  318. ) -> Annotated[
  319. Any, ContentType("application/msgpack"), ContentType("application/json")
  320. ]:
  321. if self.content_type == "application/msgpack":
  322. return ormsgpack.unpackb(await self.body)
  323. elif self.content_type == "application/json":
  324. return await self.json
  325. raise HTTPException(
  326. HTTPStatus.UNSUPPORTED_MEDIA_TYPE,
  327. headers={"Accept": "application/msgpack, application/json"},
  328. )
  329. app = Kui(
  330. routes=routes + openapi[1:], # Remove the default route
  331. exception_handlers={
  332. HTTPException: http_execption_handler,
  333. Exception: other_exception_handler,
  334. },
  335. factory_class=FactoryClass(http=MsgPackRequest),
  336. cors_config={},
  337. )
  338. # Each worker process created by Uvicorn has its own memory space,
  339. # meaning that models and variables are not shared between processes.
  340. # Therefore, any global variables (like `llama_queue` or `decoder_model`)
  341. # will not be shared across workers.
  342. # Multi-threading for deep learning can cause issues, such as inconsistent
  343. # outputs if multiple threads access the same buffers simultaneously.
  344. # Instead, it's better to use multiprocessing or independent models per thread.
  345. @app.on_startup
  346. def initialize_app(app: Kui):
  347. global args, llama_queue, decoder_model, prompt_tokens, prompt_texts
  348. prompt_tokens, prompt_texts = [], []
  349. args = parse_args() # args same as ones in other processes
  350. args.precision = torch.half if args.half else torch.bfloat16
  351. logger.info("Loading Llama model...")
  352. llama_queue = launch_thread_safe_queue(
  353. checkpoint_path=args.llama_checkpoint_path,
  354. device=args.device,
  355. precision=args.precision,
  356. compile=args.compile,
  357. )
  358. logger.info("Llama model loaded, loading VQ-GAN model...")
  359. decoder_model = load_decoder_model(
  360. config_name=args.decoder_config_name,
  361. checkpoint_path=args.decoder_checkpoint_path,
  362. device=args.device,
  363. )
  364. logger.info("VQ-GAN model loaded, warming up...")
  365. # Dry run to ensure models work and avoid first-time latency
  366. list(
  367. inference(
  368. ServeTTSRequest(
  369. text="Hello world.",
  370. references=[],
  371. reference_id=None,
  372. max_new_tokens=0,
  373. chunk_length=200,
  374. top_p=0.7,
  375. repetition_penalty=1.2,
  376. temperature=0.7,
  377. emotion=None,
  378. format="wav",
  379. )
  380. )
  381. )
  382. logger.info(f"Warming up done, starting server at http://{args.listen}")
  383. if __name__ == "__main__":
  384. import uvicorn
  385. args = parse_args()
  386. host, port = args.listen.split(":")
  387. uvicorn.run(
  388. "tools.api:app",
  389. host=host,
  390. port=int(port),
  391. workers=args.workers,
  392. log_level="info",
  393. )