api.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444
  1. import io
  2. import queue
  3. import sys
  4. import traceback
  5. import wave
  6. from argparse import ArgumentParser
  7. from http import HTTPStatus
  8. from pathlib import Path
  9. from typing import Annotated, Any
  10. import numpy as np
  11. import ormsgpack
  12. import pyrootutils
  13. import soundfile as sf
  14. import torch
  15. import torchaudio
  16. from baize.datastructures import ContentType
  17. from kui.asgi import (
  18. Body,
  19. FactoryClass,
  20. HTTPException,
  21. HttpRequest,
  22. HttpView,
  23. JSONResponse,
  24. Kui,
  25. OpenAPI,
  26. StreamResponse,
  27. )
  28. from kui.asgi.routing import MultimethodRoutes
  29. from loguru import logger
  30. pyrootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
  31. # from fish_speech.models.vqgan.lit_module import VQGAN
  32. from fish_speech.models.vqgan.modules.firefly import FireflyArchitecture
  33. from fish_speech.text.chn_text_norm.text import Text as ChnNormedText
  34. from fish_speech.utils import autocast_exclude_mps
  35. from tools.commons import ServeTTSRequest
  36. from tools.file import AUDIO_EXTENSIONS, audio_to_bytes, list_files, read_ref_text
  37. from tools.llama.generate import (
  38. GenerateRequest,
  39. GenerateResponse,
  40. WrappedGenerateResponse,
  41. launch_thread_safe_queue,
  42. )
  43. from tools.vqgan.inference import load_model as load_decoder_model
  44. def wav_chunk_header(sample_rate=44100, bit_depth=16, channels=1):
  45. buffer = io.BytesIO()
  46. with wave.open(buffer, "wb") as wav_file:
  47. wav_file.setnchannels(channels)
  48. wav_file.setsampwidth(bit_depth // 8)
  49. wav_file.setframerate(sample_rate)
  50. wav_header_bytes = buffer.getvalue()
  51. buffer.close()
  52. return wav_header_bytes
  53. # Define utils for web server
  54. async def http_execption_handler(exc: HTTPException):
  55. return JSONResponse(
  56. dict(
  57. statusCode=exc.status_code,
  58. message=exc.content,
  59. error=HTTPStatus(exc.status_code).phrase,
  60. ),
  61. exc.status_code,
  62. exc.headers,
  63. )
  64. async def other_exception_handler(exc: "Exception"):
  65. traceback.print_exc()
  66. status = HTTPStatus.INTERNAL_SERVER_ERROR
  67. return JSONResponse(
  68. dict(statusCode=status, message=str(exc), error=status.phrase),
  69. status,
  70. )
  71. def load_audio(reference_audio, sr):
  72. if len(reference_audio) > 255 or not Path(reference_audio).exists():
  73. audio_data = reference_audio
  74. reference_audio = io.BytesIO(audio_data)
  75. waveform, original_sr = torchaudio.load(
  76. reference_audio, backend="ffmpeg" if sys.platform == "linux" else "soundfile"
  77. )
  78. if waveform.shape[0] > 1:
  79. waveform = torch.mean(waveform, dim=0, keepdim=True)
  80. if original_sr != sr:
  81. resampler = torchaudio.transforms.Resample(orig_freq=original_sr, new_freq=sr)
  82. waveform = resampler(waveform)
  83. audio = waveform.squeeze().numpy()
  84. return audio
  85. def encode_reference(*, decoder_model, reference_audio, enable_reference_audio):
  86. if enable_reference_audio and reference_audio is not None:
  87. # Load audios, and prepare basic info here
  88. reference_audio_content = load_audio(
  89. reference_audio, decoder_model.spec_transform.sample_rate
  90. )
  91. audios = torch.from_numpy(reference_audio_content).to(decoder_model.device)[
  92. None, None, :
  93. ]
  94. audio_lengths = torch.tensor(
  95. [audios.shape[2]], device=decoder_model.device, dtype=torch.long
  96. )
  97. logger.info(
  98. f"Loaded audio with {audios.shape[2] / decoder_model.spec_transform.sample_rate:.2f} seconds"
  99. )
  100. # VQ Encoder
  101. if isinstance(decoder_model, FireflyArchitecture):
  102. prompt_tokens = decoder_model.encode(audios, audio_lengths)[0][0]
  103. logger.info(f"Encoded prompt: {prompt_tokens.shape}")
  104. else:
  105. prompt_tokens = None
  106. logger.info("No reference audio provided")
  107. return prompt_tokens
  108. def decode_vq_tokens(
  109. *,
  110. decoder_model,
  111. codes,
  112. ):
  113. feature_lengths = torch.tensor([codes.shape[1]], device=decoder_model.device)
  114. logger.info(f"VQ features: {codes.shape}")
  115. if isinstance(decoder_model, FireflyArchitecture):
  116. # VQGAN Inference
  117. return decoder_model.decode(
  118. indices=codes[None],
  119. feature_lengths=feature_lengths,
  120. )[0].squeeze()
  121. raise ValueError(f"Unknown model type: {type(decoder_model)}")
  122. routes = MultimethodRoutes(base_class=HttpView)
  123. def get_content_type(audio_format):
  124. if audio_format == "wav":
  125. return "audio/wav"
  126. elif audio_format == "flac":
  127. return "audio/flac"
  128. elif audio_format == "mp3":
  129. return "audio/mpeg"
  130. else:
  131. return "application/octet-stream"
  132. @torch.inference_mode()
  133. def inference(req: ServeTTSRequest):
  134. idstr: str | None = req.reference_id
  135. if idstr is not None:
  136. ref_folder = Path("references") / idstr
  137. ref_folder.mkdir(parents=True, exist_ok=True)
  138. ref_audios = list_files(
  139. ref_folder, AUDIO_EXTENSIONS, recursive=True, sort=False
  140. )
  141. prompt_tokens = [
  142. encode_reference(
  143. decoder_model=decoder_model,
  144. reference_audio=audio_to_bytes(str(ref_audio)),
  145. enable_reference_audio=True,
  146. )
  147. for ref_audio in ref_audios
  148. ]
  149. prompt_texts = [
  150. read_ref_text(str(ref_audio.with_suffix(".lab")))
  151. for ref_audio in ref_audios
  152. ]
  153. else:
  154. # Parse reference audio aka prompt
  155. refs = req.references
  156. if refs is None:
  157. refs = []
  158. prompt_tokens = [
  159. encode_reference(
  160. decoder_model=decoder_model,
  161. reference_audio=ref.audio,
  162. enable_reference_audio=True,
  163. )
  164. for ref in refs
  165. ]
  166. prompt_texts = [ref.text for ref in refs]
  167. # LLAMA Inference
  168. request = dict(
  169. device=decoder_model.device,
  170. max_new_tokens=req.max_new_tokens,
  171. text=(
  172. req.text
  173. if not req.normalize
  174. else ChnNormedText(raw_text=req.text).normalize()
  175. ),
  176. top_p=req.top_p,
  177. repetition_penalty=req.repetition_penalty,
  178. temperature=req.temperature,
  179. compile=args.compile,
  180. iterative_prompt=req.chunk_length > 0,
  181. chunk_length=req.chunk_length,
  182. max_length=4096,
  183. prompt_tokens=prompt_tokens,
  184. prompt_text=prompt_texts,
  185. )
  186. response_queue = queue.Queue()
  187. llama_queue.put(
  188. GenerateRequest(
  189. request=request,
  190. response_queue=response_queue,
  191. )
  192. )
  193. if req.streaming:
  194. yield wav_chunk_header()
  195. segments = []
  196. while True:
  197. result: WrappedGenerateResponse = response_queue.get()
  198. if result.status == "error":
  199. raise result.response
  200. break
  201. result: GenerateResponse = result.response
  202. if result.action == "next":
  203. break
  204. with autocast_exclude_mps(
  205. device_type=decoder_model.device.type, dtype=args.precision
  206. ):
  207. fake_audios = decode_vq_tokens(
  208. decoder_model=decoder_model,
  209. codes=result.codes,
  210. )
  211. fake_audios = fake_audios.float().cpu().numpy()
  212. if req.streaming:
  213. yield (fake_audios * 32768).astype(np.int16).tobytes()
  214. else:
  215. segments.append(fake_audios)
  216. if req.streaming:
  217. return
  218. if len(segments) == 0:
  219. raise HTTPException(
  220. HTTPStatus.INTERNAL_SERVER_ERROR,
  221. content="No audio generated, please check the input text.",
  222. )
  223. fake_audios = np.concatenate(segments, axis=0)
  224. yield fake_audios
  225. async def inference_async(req: ServeTTSRequest):
  226. for chunk in inference(req):
  227. yield chunk
  228. async def buffer_to_async_generator(buffer):
  229. yield buffer
  230. @routes.http.post("/v1/tts")
  231. async def api_invoke_model(
  232. req: Annotated[ServeTTSRequest, Body(exclusive=True)],
  233. ):
  234. """
  235. Invoke model and generate audio
  236. """
  237. if args.max_text_length > 0 and len(req.text) > args.max_text_length:
  238. raise HTTPException(
  239. HTTPStatus.BAD_REQUEST,
  240. content=f"Text is too long, max length is {args.max_text_length}",
  241. )
  242. if req.streaming and req.format != "wav":
  243. raise HTTPException(
  244. HTTPStatus.BAD_REQUEST,
  245. content="Streaming only supports WAV format",
  246. )
  247. if req.streaming:
  248. return StreamResponse(
  249. iterable=inference_async(req),
  250. headers={
  251. "Content-Disposition": f"attachment; filename=audio.{req.format}",
  252. },
  253. content_type=get_content_type(req.format),
  254. )
  255. else:
  256. fake_audios = next(inference(req))
  257. buffer = io.BytesIO()
  258. sf.write(
  259. buffer,
  260. fake_audios,
  261. decoder_model.spec_transform.sample_rate,
  262. format=req.format,
  263. )
  264. return StreamResponse(
  265. iterable=buffer_to_async_generator(buffer.getvalue()),
  266. headers={
  267. "Content-Disposition": f"attachment; filename=audio.{req.format}",
  268. },
  269. content_type=get_content_type(req.format),
  270. )
  271. @routes.http.post("/v1/health")
  272. async def api_health():
  273. """
  274. Health check
  275. """
  276. return JSONResponse({"status": "ok"})
  277. def parse_args():
  278. parser = ArgumentParser()
  279. parser.add_argument(
  280. "--llama-checkpoint-path",
  281. type=str,
  282. default="checkpoints/fish-speech-1.4",
  283. )
  284. parser.add_argument(
  285. "--decoder-checkpoint-path",
  286. type=str,
  287. default="checkpoints/fish-speech-1.4/firefly-gan-vq-fsq-8x1024-21hz-generator.pth",
  288. )
  289. parser.add_argument("--decoder-config-name", type=str, default="firefly_gan_vq")
  290. parser.add_argument("--device", type=str, default="cuda")
  291. parser.add_argument("--half", action="store_true")
  292. parser.add_argument("--compile", action="store_true")
  293. parser.add_argument("--max-text-length", type=int, default=0)
  294. parser.add_argument("--listen", type=str, default="127.0.0.1:8080")
  295. parser.add_argument("--workers", type=int, default=1)
  296. return parser.parse_args()
  297. # Define Kui app
  298. openapi = OpenAPI(
  299. {
  300. "title": "Fish Speech API",
  301. "version": "1.4.2",
  302. },
  303. ).routes
  304. class MsgPackRequest(HttpRequest):
  305. async def data(
  306. self,
  307. ) -> Annotated[
  308. Any, ContentType("application/msgpack"), ContentType("application/json")
  309. ]:
  310. if self.content_type == "application/msgpack":
  311. return ormsgpack.unpackb(await self.body)
  312. elif self.content_type == "application/json":
  313. return await self.json
  314. raise HTTPException(
  315. HTTPStatus.UNSUPPORTED_MEDIA_TYPE,
  316. headers={"Accept": "application/msgpack, application/json"},
  317. )
  318. app = Kui(
  319. routes=routes + openapi[1:], # Remove the default route
  320. exception_handlers={
  321. HTTPException: http_execption_handler,
  322. Exception: other_exception_handler,
  323. },
  324. factory_class=FactoryClass(http=MsgPackRequest),
  325. cors_config={},
  326. )
  327. if __name__ == "__main__":
  328. import uvicorn
  329. args = parse_args()
  330. args.precision = torch.half if args.half else torch.bfloat16
  331. logger.info("Loading Llama model...")
  332. llama_queue = launch_thread_safe_queue(
  333. checkpoint_path=args.llama_checkpoint_path,
  334. device=args.device,
  335. precision=args.precision,
  336. compile=args.compile,
  337. )
  338. logger.info("Llama model loaded, loading VQ-GAN model...")
  339. decoder_model = load_decoder_model(
  340. config_name=args.decoder_config_name,
  341. checkpoint_path=args.decoder_checkpoint_path,
  342. device=args.device,
  343. )
  344. logger.info("VQ-GAN model loaded, warming up...")
  345. # Dry run to check if the model is loaded correctly and avoid the first-time latency
  346. list(
  347. inference(
  348. ServeTTSRequest(
  349. text="Hello world.",
  350. references=[],
  351. reference_id=None,
  352. max_new_tokens=0,
  353. chunk_length=200,
  354. top_p=0.7,
  355. repetition_penalty=1.2,
  356. temperature=0.7,
  357. emotion=None,
  358. format="wav",
  359. )
  360. )
  361. )
  362. logger.info(f"Warming up done, starting server at http://{args.listen}")
  363. host, port = args.listen.split(":")
  364. uvicorn.run(app, host=host, port=int(port), workers=args.workers, log_level="info")