api.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426
  1. import base64
  2. import io
  3. import queue
  4. import threading
  5. import traceback
  6. import wave
  7. from argparse import ArgumentParser
  8. from http import HTTPStatus
  9. from typing import Annotated, Literal, Optional
  10. import librosa
  11. import numpy as np
  12. import pyrootutils
  13. import soundfile as sf
  14. import torch
  15. from kui.wsgi import (
  16. Body,
  17. HTTPException,
  18. HttpView,
  19. JSONResponse,
  20. Kui,
  21. OpenAPI,
  22. StreamResponse,
  23. )
  24. from kui.wsgi.routing import MultimethodRoutes
  25. from loguru import logger
  26. from pydantic import BaseModel, Field
  27. from transformers import AutoTokenizer
  28. pyrootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
  29. from fish_speech.models.vits_decoder.lit_module import VITSDecoder
  30. from fish_speech.models.vqgan.lit_module import VQGAN
  31. from tools.llama.generate import (
  32. GenerateRequest,
  33. GenerateResponse,
  34. WrappedGenerateResponse,
  35. launch_thread_safe_queue,
  36. )
  37. from tools.vqgan.inference import load_model as load_decoder_model
  38. def wav_chunk_header(sample_rate=44100, bit_depth=16, channels=1):
  39. buffer = io.BytesIO()
  40. with wave.open(buffer, "wb") as wav_file:
  41. wav_file.setnchannels(channels)
  42. wav_file.setsampwidth(bit_depth // 8)
  43. wav_file.setframerate(sample_rate)
  44. wav_header_bytes = buffer.getvalue()
  45. buffer.close()
  46. return wav_header_bytes
  47. # Define utils for web server
  48. def http_execption_handler(exc: HTTPException):
  49. return JSONResponse(
  50. dict(
  51. statusCode=exc.status_code,
  52. message=exc.content,
  53. error=HTTPStatus(exc.status_code).phrase,
  54. ),
  55. exc.status_code,
  56. exc.headers,
  57. )
  58. def other_exception_handler(exc: "Exception"):
  59. traceback.print_exc()
  60. status = HTTPStatus.INTERNAL_SERVER_ERROR
  61. return JSONResponse(
  62. dict(statusCode=status, message=str(exc), error=status.phrase),
  63. status,
  64. )
  65. def encode_reference(*, decoder_model, reference_audio, enable_reference_audio):
  66. if enable_reference_audio and reference_audio is not None:
  67. # Load audios, and prepare basic info here
  68. reference_audio_content, _ = librosa.load(
  69. reference_audio, sr=decoder_model.sampling_rate, mono=True
  70. )
  71. audios = torch.from_numpy(reference_audio_content).to(decoder_model.device)[
  72. None, None, :
  73. ]
  74. audio_lengths = torch.tensor(
  75. [audios.shape[2]], device=decoder_model.device, dtype=torch.long
  76. )
  77. logger.info(
  78. f"Loaded audio with {audios.shape[2] / decoder_model.sampling_rate:.2f} seconds"
  79. )
  80. # VQ Encoder
  81. if isinstance(decoder_model, VQGAN):
  82. prompt_tokens = decoder_model.encode(audios, audio_lengths)[0][0]
  83. reference_embedding = None # VQGAN does not have reference embedding
  84. elif isinstance(decoder_model, VITSDecoder):
  85. reference_spec = decoder_model.spec_transform(audios[0])
  86. reference_embedding = decoder_model.generator.encode_ref(
  87. reference_spec,
  88. torch.tensor([reference_spec.shape[-1]], device=decoder_model.device),
  89. )
  90. logger.info(f"Loaded reference audio from {reference_audio}")
  91. audio_lengths = torch.tensor(
  92. [audios.shape[-1]], device=decoder_model.device, dtype=torch.long
  93. )
  94. prompt_tokens = decoder_model.generator.vq.encode(audios, audio_lengths)[0][
  95. 0
  96. ]
  97. else:
  98. raise ValueError(f"Unknown model type: {type(decoder_model)}")
  99. logger.info(f"Encoded prompt: {prompt_tokens.shape}")
  100. elif isinstance(decoder_model, VITSDecoder):
  101. prompt_tokens = None
  102. reference_embedding = torch.zeros(
  103. 1, decoder_model.generator.gin_channels, 1, device=decoder_model.device
  104. )
  105. logger.info("No reference audio provided, use zero embedding")
  106. else:
  107. prompt_tokens = None
  108. reference_embedding = None
  109. logger.info("No reference audio provided")
  110. return prompt_tokens, reference_embedding
  111. def decode_vq_tokens(
  112. *,
  113. decoder_model,
  114. codes,
  115. text_tokens: torch.Tensor | None = None,
  116. reference_embedding: torch.Tensor | None = None,
  117. ):
  118. feature_lengths = torch.tensor([codes.shape[1]], device=decoder_model.device)
  119. logger.info(f"VQ features: {codes.shape}")
  120. if isinstance(decoder_model, VQGAN):
  121. # VQGAN Inference
  122. return decoder_model.decode(
  123. indices=codes[None],
  124. feature_lengths=feature_lengths,
  125. return_audios=True,
  126. ).squeeze()
  127. if isinstance(decoder_model, VITSDecoder):
  128. # VITS Inference
  129. quantized = decoder_model.generator.vq.indicies_to_vq_features(
  130. indices=codes[None], feature_lengths=feature_lengths
  131. )
  132. logger.info(f"Restored VQ features: {quantized.shape}")
  133. return decoder_model.generator.decode(
  134. quantized,
  135. torch.tensor([quantized.shape[-1]], device=decoder_model.device),
  136. text_tokens,
  137. torch.tensor([text_tokens.shape[-1]], device=decoder_model.device),
  138. ge=reference_embedding,
  139. ).squeeze()
  140. raise ValueError(f"Unknown model type: {type(decoder_model)}")
  141. routes = MultimethodRoutes(base_class=HttpView)
  142. class InvokeRequest(BaseModel):
  143. text: str = "你说的对, 但是原神是一款由米哈游自主研发的开放世界手游."
  144. reference_text: Optional[str] = None
  145. reference_audio: Optional[str] = None
  146. max_new_tokens: int = 0
  147. chunk_length: Annotated[int, Field(ge=0, le=200, strict=True)] = 30
  148. top_p: Annotated[float, Field(ge=0.1, le=1.0, strict=True)] = 0.7
  149. repetition_penalty: Annotated[float, Field(ge=0.9, le=2.0, strict=True)] = 1.5
  150. temperature: Annotated[float, Field(ge=0.1, le=1.0, strict=True)] = 0.7
  151. speaker: Optional[str] = None
  152. format: Literal["wav", "mp3", "flac"] = "wav"
  153. streaming: bool = False
  154. @torch.inference_mode()
  155. def inference(req: InvokeRequest):
  156. # Parse reference audio aka prompt
  157. prompt_tokens = None
  158. # Parse reference audio aka prompt
  159. prompt_tokens, reference_embedding = encode_reference(
  160. decoder_model=decoder_model,
  161. reference_audio=(
  162. io.BytesIO(base64.b64decode(req.reference_audio))
  163. if req.reference_audio is not None
  164. else None
  165. ),
  166. enable_reference_audio=req.reference_audio is not None,
  167. )
  168. # LLAMA Inference
  169. request = dict(
  170. tokenizer=llama_tokenizer,
  171. device=decoder_model.device,
  172. max_new_tokens=req.max_new_tokens,
  173. text=req.text,
  174. top_p=req.top_p,
  175. repetition_penalty=req.repetition_penalty,
  176. temperature=req.temperature,
  177. compile=args.compile,
  178. iterative_prompt=req.chunk_length > 0,
  179. chunk_length=req.chunk_length,
  180. max_length=args.max_length,
  181. speaker=req.speaker,
  182. prompt_tokens=prompt_tokens,
  183. prompt_text=req.reference_text,
  184. )
  185. response_queue = queue.Queue()
  186. llama_queue.put(
  187. GenerateRequest(
  188. request=request,
  189. response_queue=response_queue,
  190. )
  191. )
  192. if req.streaming:
  193. yield wav_chunk_header()
  194. segments = []
  195. while True:
  196. result: WrappedGenerateResponse = response_queue.get()
  197. if result.status == "error":
  198. raise result.response
  199. break
  200. result: GenerateResponse = result.response
  201. if result.action == "next":
  202. break
  203. text_tokens = llama_tokenizer.encode(result.text, return_tensors="pt").to(
  204. decoder_model.device
  205. )
  206. with torch.autocast(
  207. device_type=decoder_model.device.type, dtype=args.precision
  208. ):
  209. fake_audios = decode_vq_tokens(
  210. decoder_model=decoder_model,
  211. codes=result.codes,
  212. text_tokens=text_tokens,
  213. reference_embedding=reference_embedding,
  214. )
  215. fake_audios = fake_audios.float().cpu().numpy()
  216. if req.streaming:
  217. yield (fake_audios * 32768).astype(np.int16).tobytes()
  218. else:
  219. segments.append(fake_audios)
  220. if req.streaming:
  221. return
  222. if len(segments) == 0:
  223. raise HTTPException(
  224. HTTPStatus.INTERNAL_SERVER_ERROR,
  225. content="No audio generated, please check the input text.",
  226. )
  227. fake_audios = np.concatenate(segments, axis=0)
  228. yield fake_audios
  229. @routes.http.post("/v1/invoke")
  230. def api_invoke_model(
  231. req: Annotated[InvokeRequest, Body(exclusive=True)],
  232. ):
  233. """
  234. Invoke model and generate audio
  235. """
  236. if args.max_text_length > 0 and len(req.text) > args.max_text_length:
  237. raise HTTPException(
  238. HTTPStatus.BAD_REQUEST,
  239. content=f"Text is too long, max length is {args.max_text_length}",
  240. )
  241. if req.streaming and req.format != "wav":
  242. raise HTTPException(
  243. HTTPStatus.BAD_REQUEST,
  244. content="Streaming only supports WAV format",
  245. )
  246. generator = inference(req)
  247. if req.streaming:
  248. return StreamResponse(
  249. iterable=generator,
  250. headers={
  251. "Content-Disposition": f"attachment; filename=audio.{req.format}",
  252. },
  253. content_type="application/octet-stream",
  254. )
  255. else:
  256. fake_audios = next(generator)
  257. buffer = io.BytesIO()
  258. sf.write(buffer, fake_audios, decoder_model.sampling_rate, format=req.format)
  259. return StreamResponse(
  260. iterable=[buffer.getvalue()],
  261. headers={
  262. "Content-Disposition": f"attachment; filename=audio.{req.format}",
  263. },
  264. content_type="application/octet-stream",
  265. )
  266. @routes.http.post("/v1/health")
  267. def api_health():
  268. """
  269. Health check
  270. """
  271. return JSONResponse({"status": "ok"})
  272. def parse_args():
  273. parser = ArgumentParser()
  274. parser.add_argument(
  275. "--llama-checkpoint-path",
  276. type=str,
  277. default="checkpoints/text2semantic-sft-medium-v1-4k.pth",
  278. )
  279. parser.add_argument(
  280. "--llama-config-name", type=str, default="dual_ar_2_codebook_medium"
  281. )
  282. parser.add_argument(
  283. "--decoder-checkpoint-path",
  284. type=str,
  285. default="checkpoints/vq-gan-group-fsq-2x1024.pth",
  286. )
  287. parser.add_argument("--decoder-config-name", type=str, default="vqgan_pretrain")
  288. parser.add_argument("--tokenizer", type=str, default="fishaudio/fish-speech-1")
  289. parser.add_argument("--device", type=str, default="cuda")
  290. parser.add_argument("--half", action="store_true")
  291. parser.add_argument("--max-length", type=int, default=2048)
  292. parser.add_argument("--compile", action="store_true")
  293. parser.add_argument("--max-text-length", type=int, default=0)
  294. parser.add_argument("--listen", type=str, default="127.0.0.1:8000")
  295. return parser.parse_args()
  296. # Define Kui app
  297. openapi = OpenAPI(
  298. {
  299. "title": "Fish Speech API",
  300. },
  301. ).routes
  302. app = Kui(
  303. routes=routes + openapi[1:], # Remove the default route
  304. exception_handlers={
  305. HTTPException: http_execption_handler,
  306. Exception: other_exception_handler,
  307. },
  308. cors_config={},
  309. )
  310. if __name__ == "__main__":
  311. import threading
  312. from zibai import create_bind_socket, serve
  313. args = parse_args()
  314. args.precision = torch.half if args.half else torch.bfloat16
  315. logger.info("Loading Llama model...")
  316. llama_queue = launch_thread_safe_queue(
  317. config_name=args.llama_config_name,
  318. checkpoint_path=args.llama_checkpoint_path,
  319. device=args.device,
  320. precision=args.precision,
  321. max_length=args.max_length,
  322. compile=args.compile,
  323. )
  324. llama_tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
  325. logger.info("Llama model loaded, loading VQ-GAN model...")
  326. decoder_model = load_decoder_model(
  327. config_name=args.decoder_config_name,
  328. checkpoint_path=args.decoder_checkpoint_path,
  329. device=args.device,
  330. )
  331. logger.info("VQ-GAN model loaded, warming up...")
  332. # Dry run to check if the model is loaded correctly and avoid the first-time latency
  333. list(
  334. inference(
  335. InvokeRequest(
  336. text="A warm-up sentence.",
  337. reference_text=None,
  338. reference_audio=None,
  339. max_new_tokens=0,
  340. chunk_length=30,
  341. top_p=0.7,
  342. repetition_penalty=1.5,
  343. temperature=0.7,
  344. speaker=None,
  345. format="wav",
  346. )
  347. )
  348. )
  349. logger.info(f"Warming up done, starting server at http://{args.listen}")
  350. sock = create_bind_socket(args.listen)
  351. sock.listen()
  352. # Start server
  353. serve(
  354. app=app,
  355. bind_sockets=[sock],
  356. max_workers=10,
  357. graceful_exit=threading.Event(),
  358. )