api.py 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. import base64
  2. import io
  3. import threading
  4. import traceback
  5. from argparse import ArgumentParser
  6. from http import HTTPStatus
  7. from threading import Lock
  8. from typing import Annotated, Literal, Optional
  9. import librosa
  10. import soundfile as sf
  11. import torch
  12. from kui.wsgi import (
  13. Body,
  14. HTTPException,
  15. HttpView,
  16. JSONResponse,
  17. Kui,
  18. OpenAPI,
  19. StreamResponse,
  20. )
  21. from kui.wsgi.routing import MultimethodRoutes
  22. from loguru import logger
  23. from pydantic import BaseModel
  24. from transformers import AutoTokenizer
  25. from tools.llama.generate import launch_thread_safe_queue
  26. from tools.vqgan.inference import load_model as load_vqgan_model
  27. from tools.webui import inference
  28. lock = Lock()
  29. # Define utils for web server
  30. def http_execption_handler(exc: HTTPException):
  31. return JSONResponse(
  32. dict(
  33. statusCode=exc.status_code,
  34. message=exc.content,
  35. error=HTTPStatus(exc.status_code).phrase,
  36. ),
  37. exc.status_code,
  38. exc.headers,
  39. )
  40. def other_exception_handler(exc: "Exception"):
  41. traceback.print_exc()
  42. status = HTTPStatus.INTERNAL_SERVER_ERROR
  43. return JSONResponse(
  44. dict(statusCode=status, message=str(exc), error=status.phrase),
  45. status,
  46. )
  47. routes = MultimethodRoutes(base_class=HttpView)
  48. class InvokeRequest(BaseModel):
  49. text: str = "你说的对, 但是原神是一款由米哈游自主研发的开放世界手游."
  50. reference_text: Optional[str] = None
  51. reference_audio: Optional[str] = None
  52. max_new_tokens: int = 0
  53. chunk_length: int = 30
  54. top_k: int = 0
  55. top_p: float = 0.7
  56. repetition_penalty: float = 1.5
  57. temperature: float = 0.7
  58. speaker: Optional[str] = None
  59. format: Literal["wav", "mp3", "flac"] = "wav"
  60. def inference(req: InvokeRequest):
  61. # Parse reference audio aka prompt
  62. prompt_tokens = None
  63. if req.reference_audio is not None:
  64. buffer = io.BytesIO(base64.b64decode(req.reference_audio))
  65. reference_audio_content, _ = librosa.load(
  66. buffer, sr=vqgan_model.sampling_rate, mono=True
  67. )
  68. audios = torch.from_numpy(reference_audio_content).to(vqgan_model.device)[
  69. None, None, :
  70. ]
  71. logger.info(
  72. f"Loaded audio with {audios.shape[2] / vqgan_model.sampling_rate:.2f} seconds"
  73. )
  74. # VQ Encoder
  75. audio_lengths = torch.tensor(
  76. [audios.shape[2]], device=vqgan_model.device, dtype=torch.long
  77. )
  78. prompt_tokens = vqgan_model.encode(audios, audio_lengths)[0][0]
  79. # LLAMA Inference
  80. request = dict(
  81. tokenizer=llama_tokenizer,
  82. device=vqgan_model.device,
  83. max_new_tokens=req.max_new_tokens,
  84. text=req.text,
  85. top_k=int(req.top_k) if req.top_k > 0 else None,
  86. top_p=req.top_p,
  87. repetition_penalty=req.repetition_penalty,
  88. temperature=req.temperature,
  89. compile=args.compile,
  90. iterative_prompt=req.chunk_length > 0,
  91. chunk_length=req.chunk_length,
  92. max_length=args.max_length,
  93. speaker=req.speaker,
  94. prompt_tokens=prompt_tokens,
  95. prompt_text=req.reference_text,
  96. )
  97. payload = dict(
  98. event=threading.Event(),
  99. request=request,
  100. )
  101. llama_queue.put(payload)
  102. # Wait for the result
  103. payload["event"].wait()
  104. if payload["success"] is False:
  105. raise payload["response"]
  106. codes = payload["response"][0]
  107. # VQGAN Inference
  108. feature_lengths = torch.tensor([codes.shape[1]], device=vqgan_model.device)
  109. fake_audios = vqgan_model.decode(
  110. indices=codes[None], feature_lengths=feature_lengths, return_audios=True
  111. )[0, 0]
  112. fake_audios = fake_audios.float().cpu().numpy()
  113. return fake_audios
  114. @routes.http.post("/v1/invoke")
  115. def api_invoke_model(
  116. req: Annotated[InvokeRequest, Body(exclusive=True)],
  117. ):
  118. """
  119. Invoke model and generate audio
  120. """
  121. if args.max_gradio_length > 0 and len(req.text) > args.max_gradio_length:
  122. raise HTTPException(
  123. HTTPStatus.BAD_REQUEST,
  124. content=f"Text is too long, max length is {args.max_gradio_length}",
  125. )
  126. try:
  127. # Lock, avoid interrupting the inference process
  128. lock.acquire()
  129. fake_audios = inference(req)
  130. except Exception as e:
  131. import traceback
  132. traceback.print_exc()
  133. raise HTTPException(HTTPStatus.INTERNAL_SERVER_ERROR, content=str(e))
  134. finally:
  135. # Release lock
  136. lock.release()
  137. buffer = io.BytesIO()
  138. sf.write(buffer, fake_audios, vqgan_model.sampling_rate, format=req.format)
  139. return StreamResponse(
  140. iterable=[buffer.getvalue()],
  141. headers={
  142. "Content-Disposition": f"attachment; filename=audio.{req.format}",
  143. },
  144. # Make swagger-ui happy
  145. # content_type=f"audio/{req.format}",
  146. content_type="application/octet-stream",
  147. )
  148. @routes.http.post("/v1/health")
  149. def api_health():
  150. """
  151. Health check
  152. """
  153. return JSONResponse({"status": "ok"})
  154. def parse_args():
  155. parser = ArgumentParser()
  156. parser.add_argument(
  157. "--llama-checkpoint-path",
  158. type=str,
  159. default="checkpoints/text2semantic-medium-v1-2k.pth",
  160. )
  161. parser.add_argument(
  162. "--llama-config-name", type=str, default="dual_ar_2_codebook_medium"
  163. )
  164. parser.add_argument(
  165. "--vqgan-checkpoint-path",
  166. type=str,
  167. default="checkpoints/vq-gan-group-fsq-2x1024.pth",
  168. )
  169. parser.add_argument("--vqgan-config-name", type=str, default="vqgan_pretrain")
  170. parser.add_argument("--tokenizer", type=str, default="fishaudio/fish-speech-1")
  171. parser.add_argument("--device", type=str, default="cuda")
  172. parser.add_argument("--half", action="store_true")
  173. parser.add_argument("--max-length", type=int, default=2048)
  174. parser.add_argument("--compile", action="store_true")
  175. parser.add_argument("--max-gradio-length", type=int, default=0)
  176. parser.add_argument("--listen", type=str, default="127.0.0.1:8000")
  177. return parser.parse_args()
  178. # Define Kui app
  179. openapi = OpenAPI(
  180. {
  181. "title": "Fish Speech API",
  182. },
  183. ).routes
  184. app = Kui(
  185. routes=routes + openapi[1:], # Remove the default route
  186. exception_handlers={
  187. HTTPException: http_execption_handler,
  188. Exception: other_exception_handler,
  189. },
  190. cors_config={},
  191. )
  192. if __name__ == "__main__":
  193. import threading
  194. from zibai import create_bind_socket, serve
  195. args = parse_args()
  196. args.precision = torch.half if args.half else torch.bfloat16
  197. logger.info("Loading Llama model...")
  198. llama_queue = launch_thread_safe_queue(
  199. config_name=args.llama_config_name,
  200. checkpoint_path=args.llama_checkpoint_path,
  201. device=args.device,
  202. precision=args.precision,
  203. max_length=args.max_length,
  204. compile=args.compile,
  205. )
  206. llama_tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
  207. logger.info("Llama model loaded, loading VQ-GAN model...")
  208. vqgan_model = load_vqgan_model(
  209. config_name=args.vqgan_config_name,
  210. checkpoint_path=args.vqgan_checkpoint_path,
  211. device=args.device,
  212. )
  213. logger.info("VQ-GAN model loaded, warming up...")
  214. # Dry run to check if the model is loaded correctly and avoid the first-time latency
  215. inference(
  216. InvokeRequest(
  217. text="A warm-up sentence.",
  218. reference_text=None,
  219. reference_audio=None,
  220. max_new_tokens=0,
  221. chunk_length=30,
  222. top_k=0,
  223. top_p=0.7,
  224. repetition_penalty=1.5,
  225. temperature=0.7,
  226. speaker=None,
  227. format="wav",
  228. )
  229. )
  230. logger.info(f"Warming up done, starting server at http://{args.listen}")
  231. sock = create_bind_socket(args.listen)
  232. sock.listen()
  233. # Start server
  234. serve(
  235. app=app,
  236. bind_sockets=[sock],
  237. max_workers=10,
  238. graceful_exit=threading.Event(),
  239. )