api.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. import base64
  2. import io
  3. import json
  4. import queue
  5. import random
  6. import threading
  7. import traceback
  8. import wave
  9. from argparse import ArgumentParser
  10. from http import HTTPStatus
  11. from pathlib import Path
  12. from typing import Annotated, Literal, Optional
  13. import librosa
  14. import numpy as np
  15. import pyrootutils
  16. import soundfile as sf
  17. import torch
  18. from kui.asgi import (
  19. Body,
  20. FileResponse,
  21. HTTPException,
  22. HttpView,
  23. JSONResponse,
  24. Kui,
  25. OpenAPI,
  26. StreamResponse,
  27. )
  28. from kui.asgi.routing import MultimethodRoutes
  29. from loguru import logger
  30. from pydantic import BaseModel, Field
  31. from transformers import AutoTokenizer
  32. pyrootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
  33. # from fish_speech.models.vqgan.lit_module import VQGAN
  34. from fish_speech.models.vqgan.modules.firefly import FireflyArchitecture
  35. from tools.llama.generate import (
  36. GenerateRequest,
  37. GenerateResponse,
  38. WrappedGenerateResponse,
  39. launch_thread_safe_queue,
  40. )
  41. from tools.vqgan.inference import load_model as load_decoder_model
  42. def wav_chunk_header(sample_rate=44100, bit_depth=16, channels=1):
  43. buffer = io.BytesIO()
  44. with wave.open(buffer, "wb") as wav_file:
  45. wav_file.setnchannels(channels)
  46. wav_file.setsampwidth(bit_depth // 8)
  47. wav_file.setframerate(sample_rate)
  48. wav_header_bytes = buffer.getvalue()
  49. buffer.close()
  50. return wav_header_bytes
  51. # Define utils for web server
  52. async def http_execption_handler(exc: HTTPException):
  53. return JSONResponse(
  54. dict(
  55. statusCode=exc.status_code,
  56. message=exc.content,
  57. error=HTTPStatus(exc.status_code).phrase,
  58. ),
  59. exc.status_code,
  60. exc.headers,
  61. )
  62. async def other_exception_handler(exc: "Exception"):
  63. traceback.print_exc()
  64. status = HTTPStatus.INTERNAL_SERVER_ERROR
  65. return JSONResponse(
  66. dict(statusCode=status, message=str(exc), error=status.phrase),
  67. status,
  68. )
  69. def encode_reference(*, decoder_model, reference_audio, enable_reference_audio):
  70. if enable_reference_audio and reference_audio is not None:
  71. # Load audios, and prepare basic info here
  72. reference_audio_content, _ = librosa.load(
  73. reference_audio, sr=decoder_model.spec_transform.sample_rate, mono=True
  74. )
  75. audios = torch.from_numpy(reference_audio_content).to(decoder_model.device)[
  76. None, None, :
  77. ]
  78. audio_lengths = torch.tensor(
  79. [audios.shape[2]], device=decoder_model.device, dtype=torch.long
  80. )
  81. logger.info(
  82. f"Loaded audio with {audios.shape[2] / decoder_model.spec_transform.sample_rate:.2f} seconds"
  83. )
  84. # VQ Encoder
  85. if isinstance(decoder_model, FireflyArchitecture):
  86. prompt_tokens = decoder_model.encode(audios, audio_lengths)[0][0]
  87. reference_embedding = None # VQGAN does not have reference embedding
  88. logger.info(f"Encoded prompt: {prompt_tokens.shape}")
  89. else:
  90. prompt_tokens = None
  91. reference_embedding = None
  92. logger.info("No reference audio provided")
  93. return prompt_tokens, reference_embedding
  94. def decode_vq_tokens(
  95. *,
  96. decoder_model,
  97. codes,
  98. text_tokens: torch.Tensor | None = None,
  99. reference_embedding: torch.Tensor | None = None,
  100. ):
  101. feature_lengths = torch.tensor([codes.shape[1]], device=decoder_model.device)
  102. logger.info(f"VQ features: {codes.shape}")
  103. if isinstance(decoder_model, FireflyArchitecture):
  104. # VQGAN Inference
  105. return decoder_model.decode(
  106. indices=codes[None],
  107. feature_lengths=feature_lengths,
  108. ).squeeze()
  109. raise ValueError(f"Unknown model type: {type(decoder_model)}")
  110. routes = MultimethodRoutes(base_class=HttpView)
  111. def get_random_paths(base_path, data, speaker, emotion):
  112. if base_path and data and speaker and emotion and (Path(base_path).exists()):
  113. if speaker in data and emotion in data[speaker]:
  114. files = data[speaker][emotion]
  115. lab_files = [f for f in files if f.endswith(".lab")]
  116. wav_files = [f for f in files if f.endswith(".wav")]
  117. if lab_files and wav_files:
  118. selected_lab = random.choice(lab_files)
  119. selected_wav = random.choice(wav_files)
  120. lab_path = Path(base_path) / speaker / emotion / selected_lab
  121. wav_path = Path(base_path) / speaker / emotion / selected_wav
  122. if lab_path.exists() and wav_path.exists():
  123. return lab_path, wav_path
  124. return None, None
  125. def load_json(json_file):
  126. if not json_file:
  127. logger.info("Not using a json file")
  128. return None
  129. try:
  130. with open(json_file, "r", encoding="utf-8") as file:
  131. data = json.load(file)
  132. except FileNotFoundError:
  133. logger.warning(f"ref json not found: {json_file}")
  134. data = None
  135. except Exception as e:
  136. logger.warning(f"Loading json failed: {e}")
  137. data = None
  138. return data
  139. class InvokeRequest(BaseModel):
  140. text: str = "你说的对, 但是原神是一款由米哈游自主研发的开放世界手游."
  141. reference_text: Optional[str] = None
  142. reference_audio: Optional[str] = None
  143. max_new_tokens: int = 0
  144. chunk_length: Annotated[int, Field(ge=0, le=500, strict=True)] = 150
  145. top_p: Annotated[float, Field(ge=0.1, le=1.0, strict=True)] = 0.7
  146. repetition_penalty: Annotated[float, Field(ge=0.9, le=2.0, strict=True)] = 1.5
  147. temperature: Annotated[float, Field(ge=0.1, le=1.0, strict=True)] = 0.7
  148. speaker: Optional[str] = None
  149. emotion: Optional[str] = None
  150. format: Literal["wav", "mp3", "flac"] = "wav"
  151. streaming: bool = False
  152. ref_json: Optional[str] = "ref_data.json"
  153. ref_base: Optional[str] = "ref_data"
  154. def get_content_type(audio_format):
  155. if audio_format == "wav":
  156. return "audio/wav"
  157. elif audio_format == "flac":
  158. return "audio/flac"
  159. elif audio_format == "mp3":
  160. return "audio/mpeg"
  161. else:
  162. return "application/octet-stream"
  163. @torch.inference_mode()
  164. def inference(req: InvokeRequest):
  165. # Parse reference audio aka prompt
  166. prompt_tokens = None
  167. ref_data = load_json(req.ref_json)
  168. ref_base = req.ref_base
  169. lab_path, wav_path = get_random_paths(ref_base, ref_data, req.speaker, req.emotion)
  170. if lab_path and wav_path:
  171. with open(wav_path, "rb") as wav_file:
  172. audio_bytes = wav_file.read()
  173. with open(lab_path, "r", encoding="utf-8") as lab_file:
  174. ref_text = lab_file.read()
  175. req.reference_audio = base64.b64encode(audio_bytes).decode("utf-8")
  176. req.reference_text = ref_text
  177. logger.info("ref_path: " + str(wav_path))
  178. logger.info("ref_text: " + ref_text)
  179. # Parse reference audio aka prompt
  180. prompt_tokens, reference_embedding = encode_reference(
  181. decoder_model=decoder_model,
  182. reference_audio=(
  183. io.BytesIO(base64.b64decode(req.reference_audio))
  184. if req.reference_audio is not None
  185. else None
  186. ),
  187. enable_reference_audio=req.reference_audio is not None,
  188. )
  189. # LLAMA Inference
  190. request = dict(
  191. tokenizer=llama_tokenizer,
  192. device=decoder_model.device,
  193. max_new_tokens=req.max_new_tokens,
  194. text=req.text,
  195. top_p=req.top_p,
  196. repetition_penalty=req.repetition_penalty,
  197. temperature=req.temperature,
  198. compile=args.compile,
  199. iterative_prompt=req.chunk_length > 0,
  200. chunk_length=req.chunk_length,
  201. max_length=2048,
  202. speaker=req.speaker,
  203. prompt_tokens=prompt_tokens,
  204. prompt_text=req.reference_text,
  205. )
  206. response_queue = queue.Queue()
  207. llama_queue.put(
  208. GenerateRequest(
  209. request=request,
  210. response_queue=response_queue,
  211. )
  212. )
  213. if req.streaming:
  214. yield wav_chunk_header()
  215. segments = []
  216. while True:
  217. result: WrappedGenerateResponse = response_queue.get()
  218. if result.status == "error":
  219. raise result.response
  220. break
  221. result: GenerateResponse = result.response
  222. if result.action == "next":
  223. break
  224. text_tokens = llama_tokenizer.encode(result.text, return_tensors="pt").to(
  225. decoder_model.device
  226. )
  227. with torch.autocast(
  228. device_type=decoder_model.device.type, dtype=args.precision
  229. ):
  230. fake_audios = decode_vq_tokens(
  231. decoder_model=decoder_model,
  232. codes=result.codes,
  233. text_tokens=text_tokens,
  234. reference_embedding=reference_embedding,
  235. )
  236. fake_audios = fake_audios.float().cpu().numpy()
  237. if req.streaming:
  238. yield (fake_audios * 32768).astype(np.int16).tobytes()
  239. else:
  240. segments.append(fake_audios)
  241. if req.streaming:
  242. return
  243. if len(segments) == 0:
  244. raise HTTPException(
  245. HTTPStatus.INTERNAL_SERVER_ERROR,
  246. content="No audio generated, please check the input text.",
  247. )
  248. fake_audios = np.concatenate(segments, axis=0)
  249. yield fake_audios
  250. async def inference_async(req: InvokeRequest):
  251. for chunk in inference(req):
  252. yield chunk
  253. async def buffer_to_async_generator(buffer):
  254. yield buffer
  255. @routes.http.post("/v1/invoke")
  256. async def api_invoke_model(
  257. req: Annotated[InvokeRequest, Body(exclusive=True)],
  258. ):
  259. """
  260. Invoke model and generate audio
  261. """
  262. if args.max_text_length > 0 and len(req.text) > args.max_text_length:
  263. raise HTTPException(
  264. HTTPStatus.BAD_REQUEST,
  265. content=f"Text is too long, max length is {args.max_text_length}",
  266. )
  267. if req.streaming and req.format != "wav":
  268. raise HTTPException(
  269. HTTPStatus.BAD_REQUEST,
  270. content="Streaming only supports WAV format",
  271. )
  272. if req.streaming:
  273. return StreamResponse(
  274. iterable=inference_async(req),
  275. headers={
  276. "Content-Disposition": f"attachment; filename=audio.{req.format}",
  277. },
  278. content_type=get_content_type(req.format),
  279. )
  280. else:
  281. fake_audios = next(inference(req))
  282. buffer = io.BytesIO()
  283. sf.write(
  284. buffer,
  285. fake_audios,
  286. decoder_model.spec_transform.sample_rate,
  287. format=req.format,
  288. )
  289. return StreamResponse(
  290. iterable=buffer_to_async_generator(buffer.getvalue()),
  291. headers={
  292. "Content-Disposition": f"attachment; filename=audio.{req.format}",
  293. },
  294. content_type=get_content_type(req.format),
  295. )
  296. @routes.http.post("/v1/health")
  297. async def api_health():
  298. """
  299. Health check
  300. """
  301. return JSONResponse({"status": "ok"})
  302. def parse_args():
  303. parser = ArgumentParser()
  304. parser.add_argument(
  305. "--llama-checkpoint-path",
  306. type=str,
  307. default="checkpoints/fish-speech-1.2",
  308. )
  309. parser.add_argument(
  310. "--decoder-checkpoint-path",
  311. type=str,
  312. default="checkpoints/fish-speech-1.2/firefly-gan-vq-fsq-4x1024-42hz-generator.pth",
  313. )
  314. parser.add_argument("--decoder-config-name", type=str, default="firefly_gan_vq")
  315. parser.add_argument("--tokenizer", type=str, default="fishaudio/fish-speech-1")
  316. parser.add_argument("--device", type=str, default="cuda")
  317. parser.add_argument("--half", action="store_true")
  318. parser.add_argument("--compile", action="store_true")
  319. parser.add_argument("--max-text-length", type=int, default=0)
  320. parser.add_argument("--listen", type=str, default="127.0.0.1:8000")
  321. parser.add_argument("--workers", type=int, default=1)
  322. return parser.parse_args()
  323. # Define Kui app
  324. openapi = OpenAPI(
  325. {
  326. "title": "Fish Speech API",
  327. },
  328. ).routes
  329. app = Kui(
  330. routes=routes + openapi[1:], # Remove the default route
  331. exception_handlers={
  332. HTTPException: http_execption_handler,
  333. Exception: other_exception_handler,
  334. },
  335. cors_config={},
  336. )
  337. if __name__ == "__main__":
  338. import threading
  339. import uvicorn
  340. args = parse_args()
  341. args.precision = torch.half if args.half else torch.bfloat16
  342. logger.info("Loading Llama model...")
  343. llama_queue = launch_thread_safe_queue(
  344. checkpoint_path=args.llama_checkpoint_path,
  345. device=args.device,
  346. precision=args.precision,
  347. compile=args.compile,
  348. )
  349. llama_tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
  350. logger.info("Llama model loaded, loading VQ-GAN model...")
  351. decoder_model = load_decoder_model(
  352. config_name=args.decoder_config_name,
  353. checkpoint_path=args.decoder_checkpoint_path,
  354. device=args.device,
  355. )
  356. logger.info("VQ-GAN model loaded, warming up...")
  357. # Dry run to check if the model is loaded correctly and avoid the first-time latency
  358. list(
  359. inference(
  360. InvokeRequest(
  361. text="A warm-up sentence.",
  362. reference_text=None,
  363. reference_audio=None,
  364. max_new_tokens=0,
  365. chunk_length=150,
  366. top_p=0.7,
  367. repetition_penalty=1.5,
  368. temperature=0.7,
  369. speaker=None,
  370. emotion=None,
  371. format="wav",
  372. ref_base=None,
  373. ref_json=None,
  374. )
  375. )
  376. )
  377. logger.info(f"Warming up done, starting server at http://{args.listen}")
  378. host, port = args.listen.split(":")
  379. uvicorn.run(app, host=host, port=int(port), workers=args.workers, log_level="info")