api.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447
  1. import base64
  2. import io
  3. import json
  4. import queue
  5. import random
  6. import traceback
  7. import wave
  8. from argparse import ArgumentParser
  9. from http import HTTPStatus
  10. from pathlib import Path
  11. from typing import Annotated, Literal, Optional
  12. import librosa
  13. import numpy as np
  14. import pyrootutils
  15. import soundfile as sf
  16. import torch
  17. from kui.asgi import (
  18. Body,
  19. HTTPException,
  20. HttpView,
  21. JSONResponse,
  22. Kui,
  23. OpenAPI,
  24. StreamResponse,
  25. )
  26. from kui.asgi.routing import MultimethodRoutes
  27. from loguru import logger
  28. from pydantic import BaseModel, Field
  29. pyrootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
  30. # from fish_speech.models.vqgan.lit_module import VQGAN
  31. from fish_speech.models.vqgan.modules.firefly import FireflyArchitecture
  32. from tools.llama.generate import (
  33. GenerateRequest,
  34. GenerateResponse,
  35. WrappedGenerateResponse,
  36. launch_thread_safe_queue,
  37. )
  38. from tools.vqgan.inference import load_model as load_decoder_model
  39. def wav_chunk_header(sample_rate=44100, bit_depth=16, channels=1):
  40. buffer = io.BytesIO()
  41. with wave.open(buffer, "wb") as wav_file:
  42. wav_file.setnchannels(channels)
  43. wav_file.setsampwidth(bit_depth // 8)
  44. wav_file.setframerate(sample_rate)
  45. wav_header_bytes = buffer.getvalue()
  46. buffer.close()
  47. return wav_header_bytes
  48. # Define utils for web server
  49. async def http_execption_handler(exc: HTTPException):
  50. return JSONResponse(
  51. dict(
  52. statusCode=exc.status_code,
  53. message=exc.content,
  54. error=HTTPStatus(exc.status_code).phrase,
  55. ),
  56. exc.status_code,
  57. exc.headers,
  58. )
  59. async def other_exception_handler(exc: "Exception"):
  60. traceback.print_exc()
  61. status = HTTPStatus.INTERNAL_SERVER_ERROR
  62. return JSONResponse(
  63. dict(statusCode=status, message=str(exc), error=status.phrase),
  64. status,
  65. )
  66. def load_audio(reference_audio, sr):
  67. if len(reference_audio) > 255 or not Path(reference_audio).exists():
  68. try:
  69. audio_data = base64.b64decode(reference_audio)
  70. reference_audio = io.BytesIO(audio_data)
  71. except base64.binascii.Error:
  72. raise ValueError("Invalid path or base64 string")
  73. audio, _ = librosa.load(reference_audio, sr=sr, mono=True)
  74. return audio
  75. def encode_reference(*, decoder_model, reference_audio, enable_reference_audio):
  76. if enable_reference_audio and reference_audio is not None:
  77. # Load audios, and prepare basic info here
  78. reference_audio_content = load_audio(
  79. reference_audio, decoder_model.spec_transform.sample_rate
  80. )
  81. audios = torch.from_numpy(reference_audio_content).to(decoder_model.device)[
  82. None, None, :
  83. ]
  84. audio_lengths = torch.tensor(
  85. [audios.shape[2]], device=decoder_model.device, dtype=torch.long
  86. )
  87. logger.info(
  88. f"Loaded audio with {audios.shape[2] / decoder_model.spec_transform.sample_rate:.2f} seconds"
  89. )
  90. # VQ Encoder
  91. if isinstance(decoder_model, FireflyArchitecture):
  92. prompt_tokens = decoder_model.encode(audios, audio_lengths)[0][0]
  93. logger.info(f"Encoded prompt: {prompt_tokens.shape}")
  94. else:
  95. prompt_tokens = None
  96. logger.info("No reference audio provided")
  97. return prompt_tokens
  98. def decode_vq_tokens(
  99. *,
  100. decoder_model,
  101. codes,
  102. ):
  103. feature_lengths = torch.tensor([codes.shape[1]], device=decoder_model.device)
  104. logger.info(f"VQ features: {codes.shape}")
  105. if isinstance(decoder_model, FireflyArchitecture):
  106. # VQGAN Inference
  107. return decoder_model.decode(
  108. indices=codes[None],
  109. feature_lengths=feature_lengths,
  110. ).squeeze()
  111. raise ValueError(f"Unknown model type: {type(decoder_model)}")
  112. routes = MultimethodRoutes(base_class=HttpView)
  113. def get_random_paths(base_path, data, speaker, emotion):
  114. if base_path and data and speaker and emotion and (Path(base_path).exists()):
  115. if speaker in data and emotion in data[speaker]:
  116. files = data[speaker][emotion]
  117. lab_files = [f for f in files if f.endswith(".lab")]
  118. wav_files = [f for f in files if f.endswith(".wav")]
  119. if lab_files and wav_files:
  120. selected_lab = random.choice(lab_files)
  121. selected_wav = random.choice(wav_files)
  122. lab_path = Path(base_path) / speaker / emotion / selected_lab
  123. wav_path = Path(base_path) / speaker / emotion / selected_wav
  124. if lab_path.exists() and wav_path.exists():
  125. return lab_path, wav_path
  126. return None, None
  127. def load_json(json_file):
  128. if not json_file:
  129. logger.info("Not using a json file")
  130. return None
  131. try:
  132. with open(json_file, "r", encoding="utf-8") as file:
  133. data = json.load(file)
  134. except FileNotFoundError:
  135. logger.warning(f"ref json not found: {json_file}")
  136. data = None
  137. except Exception as e:
  138. logger.warning(f"Loading json failed: {e}")
  139. data = None
  140. return data
  141. class InvokeRequest(BaseModel):
  142. text: str = "你说的对, 但是原神是一款由米哈游自主研发的开放世界手游."
  143. reference_text: Optional[str] = None
  144. reference_audio: Optional[str] = None
  145. max_new_tokens: int = 1024
  146. chunk_length: Annotated[int, Field(ge=0, le=500, strict=True)] = 100
  147. top_p: Annotated[float, Field(ge=0.1, le=1.0, strict=True)] = 0.7
  148. repetition_penalty: Annotated[float, Field(ge=0.9, le=2.0, strict=True)] = 1.2
  149. temperature: Annotated[float, Field(ge=0.1, le=1.0, strict=True)] = 0.7
  150. emotion: Optional[str] = None
  151. format: Literal["wav", "mp3", "flac"] = "wav"
  152. streaming: bool = False
  153. ref_json: Optional[str] = "ref_data.json"
  154. ref_base: Optional[str] = "ref_data"
  155. speaker: Optional[str] = None
  156. def get_content_type(audio_format):
  157. if audio_format == "wav":
  158. return "audio/wav"
  159. elif audio_format == "flac":
  160. return "audio/flac"
  161. elif audio_format == "mp3":
  162. return "audio/mpeg"
  163. else:
  164. return "application/octet-stream"
  165. @torch.inference_mode()
  166. def inference(req: InvokeRequest):
  167. # Parse reference audio aka prompt
  168. prompt_tokens = None
  169. ref_data = load_json(req.ref_json)
  170. ref_base = req.ref_base
  171. lab_path, wav_path = get_random_paths(ref_base, ref_data, req.speaker, req.emotion)
  172. if lab_path and wav_path:
  173. with open(lab_path, "r", encoding="utf-8") as lab_file:
  174. ref_text = lab_file.read()
  175. req.reference_audio = wav_path
  176. req.reference_text = ref_text
  177. logger.info("ref_path: " + str(wav_path))
  178. logger.info("ref_text: " + ref_text)
  179. # Parse reference audio aka prompt
  180. prompt_tokens = encode_reference(
  181. decoder_model=decoder_model,
  182. reference_audio=req.reference_audio,
  183. enable_reference_audio=req.reference_audio is not None,
  184. )
  185. logger.info(f"ref_text: {req.reference_text}")
  186. # LLAMA Inference
  187. request = dict(
  188. device=decoder_model.device,
  189. max_new_tokens=req.max_new_tokens,
  190. text=req.text,
  191. top_p=req.top_p,
  192. repetition_penalty=req.repetition_penalty,
  193. temperature=req.temperature,
  194. compile=args.compile,
  195. iterative_prompt=req.chunk_length > 0,
  196. chunk_length=req.chunk_length,
  197. max_length=2048,
  198. prompt_tokens=prompt_tokens,
  199. prompt_text=req.reference_text,
  200. )
  201. response_queue = queue.Queue()
  202. llama_queue.put(
  203. GenerateRequest(
  204. request=request,
  205. response_queue=response_queue,
  206. )
  207. )
  208. if req.streaming:
  209. yield wav_chunk_header()
  210. segments = []
  211. while True:
  212. result: WrappedGenerateResponse = response_queue.get()
  213. if result.status == "error":
  214. raise result.response
  215. break
  216. result: GenerateResponse = result.response
  217. if result.action == "next":
  218. break
  219. with torch.autocast(
  220. device_type=decoder_model.device.type, dtype=args.precision
  221. ):
  222. fake_audios = decode_vq_tokens(
  223. decoder_model=decoder_model,
  224. codes=result.codes,
  225. )
  226. fake_audios = fake_audios.float().cpu().numpy()
  227. if req.streaming:
  228. yield (fake_audios * 32768).astype(np.int16).tobytes()
  229. else:
  230. segments.append(fake_audios)
  231. if req.streaming:
  232. return
  233. if len(segments) == 0:
  234. raise HTTPException(
  235. HTTPStatus.INTERNAL_SERVER_ERROR,
  236. content="No audio generated, please check the input text.",
  237. )
  238. fake_audios = np.concatenate(segments, axis=0)
  239. yield fake_audios
  240. async def inference_async(req: InvokeRequest):
  241. for chunk in inference(req):
  242. yield chunk
  243. async def buffer_to_async_generator(buffer):
  244. yield buffer
  245. @routes.http.post("/v1/invoke")
  246. async def api_invoke_model(
  247. req: Annotated[InvokeRequest, Body(exclusive=True)],
  248. ):
  249. """
  250. Invoke model and generate audio
  251. """
  252. if args.max_text_length > 0 and len(req.text) > args.max_text_length:
  253. raise HTTPException(
  254. HTTPStatus.BAD_REQUEST,
  255. content=f"Text is too long, max length is {args.max_text_length}",
  256. )
  257. if req.streaming and req.format != "wav":
  258. raise HTTPException(
  259. HTTPStatus.BAD_REQUEST,
  260. content="Streaming only supports WAV format",
  261. )
  262. if req.streaming:
  263. return StreamResponse(
  264. iterable=inference_async(req),
  265. headers={
  266. "Content-Disposition": f"attachment; filename=audio.{req.format}",
  267. },
  268. content_type=get_content_type(req.format),
  269. )
  270. else:
  271. fake_audios = next(inference(req))
  272. buffer = io.BytesIO()
  273. sf.write(
  274. buffer,
  275. fake_audios,
  276. decoder_model.spec_transform.sample_rate,
  277. format=req.format,
  278. )
  279. return StreamResponse(
  280. iterable=buffer_to_async_generator(buffer.getvalue()),
  281. headers={
  282. "Content-Disposition": f"attachment; filename=audio.{req.format}",
  283. },
  284. content_type=get_content_type(req.format),
  285. )
  286. @routes.http.post("/v1/health")
  287. async def api_health():
  288. """
  289. Health check
  290. """
  291. return JSONResponse({"status": "ok"})
  292. def parse_args():
  293. parser = ArgumentParser()
  294. parser.add_argument(
  295. "--llama-checkpoint-path",
  296. type=str,
  297. default="checkpoints/fish-speech-1.2-sft",
  298. )
  299. parser.add_argument(
  300. "--decoder-checkpoint-path",
  301. type=str,
  302. default="checkpoints/fish-speech-1.2-sft/firefly-gan-vq-fsq-4x1024-42hz-generator.pth",
  303. )
  304. parser.add_argument("--decoder-config-name", type=str, default="firefly_gan_vq")
  305. parser.add_argument("--device", type=str, default="cuda")
  306. parser.add_argument("--half", action="store_true")
  307. parser.add_argument("--compile", action="store_true")
  308. parser.add_argument("--max-text-length", type=int, default=0)
  309. parser.add_argument("--listen", type=str, default="127.0.0.1:8000")
  310. parser.add_argument("--workers", type=int, default=1)
  311. return parser.parse_args()
  312. # Define Kui app
  313. openapi = OpenAPI(
  314. {
  315. "title": "Fish Speech API",
  316. },
  317. ).routes
  318. app = Kui(
  319. routes=routes + openapi[1:], # Remove the default route
  320. exception_handlers={
  321. HTTPException: http_execption_handler,
  322. Exception: other_exception_handler,
  323. },
  324. cors_config={},
  325. )
  326. if __name__ == "__main__":
  327. import threading
  328. import uvicorn
  329. args = parse_args()
  330. args.precision = torch.half if args.half else torch.bfloat16
  331. logger.info("Loading Llama model...")
  332. llama_queue = launch_thread_safe_queue(
  333. checkpoint_path=args.llama_checkpoint_path,
  334. device=args.device,
  335. precision=args.precision,
  336. compile=args.compile,
  337. )
  338. logger.info("Llama model loaded, loading VQ-GAN model...")
  339. decoder_model = load_decoder_model(
  340. config_name=args.decoder_config_name,
  341. checkpoint_path=args.decoder_checkpoint_path,
  342. device=args.device,
  343. )
  344. logger.info("VQ-GAN model loaded, warming up...")
  345. # Dry run to check if the model is loaded correctly and avoid the first-time latency
  346. list(
  347. inference(
  348. InvokeRequest(
  349. text="Hello world.",
  350. reference_text=None,
  351. reference_audio=None,
  352. max_new_tokens=0,
  353. top_p=0.7,
  354. repetition_penalty=1.2,
  355. temperature=0.7,
  356. emotion=None,
  357. format="wav",
  358. ref_base=None,
  359. ref_json=None,
  360. )
  361. )
  362. )
  363. logger.info(f"Warming up done, starting server at http://{args.listen}")
  364. host, port = args.listen.split(":")
  365. uvicorn.run(app, host=host, port=int(port), workers=args.workers, log_level="info")