api.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492
  1. import base64
  2. import io
  3. import json
  4. import queue
  5. import random
  6. import threading
  7. import traceback
  8. import wave
  9. from argparse import ArgumentParser
  10. from http import HTTPStatus
  11. from pathlib import Path
  12. from typing import Annotated, Literal, Optional
  13. import librosa
  14. import numpy as np
  15. import pyrootutils
  16. import soundfile as sf
  17. import torch
  18. from kui.wsgi import (
  19. Body,
  20. HTTPException,
  21. HttpView,
  22. JSONResponse,
  23. Kui,
  24. OpenAPI,
  25. StreamResponse,
  26. )
  27. from kui.wsgi.routing import MultimethodRoutes
  28. from loguru import logger
  29. from pydantic import BaseModel, Field
  30. from transformers import AutoTokenizer
  31. pyrootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
  32. from fish_speech.models.vits_decoder.lit_module import VITSDecoder
  33. from fish_speech.models.vqgan.lit_module import VQGAN
  34. from tools.llama.generate import (
  35. GenerateRequest,
  36. GenerateResponse,
  37. WrappedGenerateResponse,
  38. launch_thread_safe_queue,
  39. )
  40. from tools.vqgan.inference import load_model as load_decoder_model
  41. def wav_chunk_header(sample_rate=44100, bit_depth=16, channels=1):
  42. buffer = io.BytesIO()
  43. with wave.open(buffer, "wb") as wav_file:
  44. wav_file.setnchannels(channels)
  45. wav_file.setsampwidth(bit_depth // 8)
  46. wav_file.setframerate(sample_rate)
  47. wav_header_bytes = buffer.getvalue()
  48. buffer.close()
  49. return wav_header_bytes
  50. # Define utils for web server
  51. def http_execption_handler(exc: HTTPException):
  52. return JSONResponse(
  53. dict(
  54. statusCode=exc.status_code,
  55. message=exc.content,
  56. error=HTTPStatus(exc.status_code).phrase,
  57. ),
  58. exc.status_code,
  59. exc.headers,
  60. )
  61. def other_exception_handler(exc: "Exception"):
  62. traceback.print_exc()
  63. status = HTTPStatus.INTERNAL_SERVER_ERROR
  64. return JSONResponse(
  65. dict(statusCode=status, message=str(exc), error=status.phrase),
  66. status,
  67. )
  68. def encode_reference(*, decoder_model, reference_audio, enable_reference_audio):
  69. if enable_reference_audio and reference_audio is not None:
  70. # Load audios, and prepare basic info here
  71. reference_audio_content, _ = librosa.load(
  72. reference_audio, sr=decoder_model.sampling_rate, mono=True
  73. )
  74. audios = torch.from_numpy(reference_audio_content).to(decoder_model.device)[
  75. None, None, :
  76. ]
  77. audio_lengths = torch.tensor(
  78. [audios.shape[2]], device=decoder_model.device, dtype=torch.long
  79. )
  80. logger.info(
  81. f"Loaded audio with {audios.shape[2] / decoder_model.sampling_rate:.2f} seconds"
  82. )
  83. # VQ Encoder
  84. if isinstance(decoder_model, VQGAN):
  85. prompt_tokens = decoder_model.encode(audios, audio_lengths)[0][0]
  86. reference_embedding = None # VQGAN does not have reference embedding
  87. elif isinstance(decoder_model, VITSDecoder):
  88. reference_spec = decoder_model.spec_transform(audios[0])
  89. reference_embedding = decoder_model.generator.encode_ref(
  90. reference_spec,
  91. torch.tensor([reference_spec.shape[-1]], device=decoder_model.device),
  92. )
  93. logger.info(f"Loaded reference audio from {reference_audio}")
  94. prompt_tokens = decoder_model.generator.vq.encode(audios, audio_lengths)[0][
  95. 0
  96. ]
  97. else:
  98. raise ValueError(f"Unknown model type: {type(decoder_model)}")
  99. logger.info(f"Encoded prompt: {prompt_tokens.shape}")
  100. elif isinstance(decoder_model, VITSDecoder):
  101. prompt_tokens = None
  102. reference_embedding = torch.zeros(
  103. 1, decoder_model.generator.gin_channels, 1, device=decoder_model.device
  104. )
  105. logger.info("No reference audio provided, use zero embedding")
  106. else:
  107. prompt_tokens = None
  108. reference_embedding = None
  109. logger.info("No reference audio provided")
  110. return prompt_tokens, reference_embedding
  111. def decode_vq_tokens(
  112. *,
  113. decoder_model,
  114. codes,
  115. text_tokens: torch.Tensor | None = None,
  116. reference_embedding: torch.Tensor | None = None,
  117. ):
  118. feature_lengths = torch.tensor([codes.shape[1]], device=decoder_model.device)
  119. logger.info(f"VQ features: {codes.shape}")
  120. if isinstance(decoder_model, VQGAN):
  121. # VQGAN Inference
  122. return decoder_model.decode(
  123. indices=codes[None],
  124. feature_lengths=feature_lengths,
  125. return_audios=True,
  126. ).squeeze()
  127. if isinstance(decoder_model, VITSDecoder):
  128. # VITS Inference
  129. quantized = decoder_model.generator.vq.indicies_to_vq_features(
  130. indices=codes[None], feature_lengths=feature_lengths
  131. )
  132. logger.info(f"Restored VQ features: {quantized.shape}")
  133. return decoder_model.generator.decode(
  134. quantized,
  135. torch.tensor([quantized.shape[-1]], device=decoder_model.device),
  136. text_tokens,
  137. torch.tensor([text_tokens.shape[-1]], device=decoder_model.device),
  138. ge=reference_embedding,
  139. ).squeeze()
  140. raise ValueError(f"Unknown model type: {type(decoder_model)}")
  141. routes = MultimethodRoutes(base_class=HttpView)
  142. def get_random_paths(base_path, data, speaker, emotion):
  143. if base_path and data and speaker and emotion and (Path(base_path).exists()):
  144. if speaker in data and emotion in data[speaker]:
  145. files = data[speaker][emotion]
  146. lab_files = [f for f in files if f.endswith(".lab")]
  147. wav_files = [f for f in files if f.endswith(".wav")]
  148. if lab_files and wav_files:
  149. selected_lab = random.choice(lab_files)
  150. selected_wav = random.choice(wav_files)
  151. lab_path = Path(base_path) / speaker / emotion / selected_lab
  152. wav_path = Path(base_path) / speaker / emotion / selected_wav
  153. if lab_path.exists() and wav_path.exists():
  154. return lab_path, wav_path
  155. return None, None
  156. def load_json(json_file):
  157. if not json_file:
  158. logger.info("Not using a json file")
  159. return None
  160. try:
  161. with open(json_file, "r", encoding="utf-8") as file:
  162. data = json.load(file)
  163. except FileNotFoundError:
  164. logger.warning(f"ref json not found: {json_file}")
  165. data = None
  166. except Exception as e:
  167. logger.warning(f"Loading json failed: {e}")
  168. data = None
  169. return data
  170. class InvokeRequest(BaseModel):
  171. text: str = "你说的对, 但是原神是一款由米哈游自主研发的开放世界手游."
  172. reference_text: Optional[str] = None
  173. reference_audio: Optional[str] = None
  174. max_new_tokens: int = 0
  175. chunk_length: Annotated[int, Field(ge=0, le=500, strict=True)] = 150
  176. top_p: Annotated[float, Field(ge=0.1, le=1.0, strict=True)] = 0.7
  177. repetition_penalty: Annotated[float, Field(ge=0.9, le=2.0, strict=True)] = 1.5
  178. temperature: Annotated[float, Field(ge=0.1, le=1.0, strict=True)] = 0.7
  179. speaker: Optional[str] = None
  180. emotion: Optional[str] = None
  181. format: Literal["wav", "mp3", "flac"] = "wav"
  182. streaming: bool = False
  183. ref_json: Optional[str] = "ref_data.json"
  184. ref_base: Optional[str] = "ref_data"
  185. def get_content_type(audio_format):
  186. if audio_format == "wav":
  187. return "audio/wav"
  188. elif audio_format == "flac":
  189. return "audio/flac"
  190. elif audio_format == "mp3":
  191. return "audio/mpeg"
  192. else:
  193. return "application/octet-stream"
  194. @torch.inference_mode()
  195. def inference(req: InvokeRequest):
  196. # Parse reference audio aka prompt
  197. prompt_tokens = None
  198. ref_data = load_json(req.ref_json)
  199. ref_base = req.ref_base
  200. lab_path, wav_path = get_random_paths(ref_base, ref_data, req.speaker, req.emotion)
  201. if lab_path and wav_path:
  202. with open(wav_path, "rb") as wav_file:
  203. audio_bytes = wav_file.read()
  204. with open(lab_path, "r", encoding="utf-8") as lab_file:
  205. ref_text = lab_file.read()
  206. req.reference_audio = base64.b64encode(audio_bytes).decode("utf-8")
  207. req.reference_text = ref_text
  208. logger.info("ref_path: " + str(wav_path))
  209. logger.info("ref_text: " + ref_text)
  210. # Parse reference audio aka prompt
  211. prompt_tokens, reference_embedding = encode_reference(
  212. decoder_model=decoder_model,
  213. reference_audio=(
  214. io.BytesIO(base64.b64decode(req.reference_audio))
  215. if req.reference_audio is not None
  216. else None
  217. ),
  218. enable_reference_audio=req.reference_audio is not None,
  219. )
  220. # LLAMA Inference
  221. request = dict(
  222. tokenizer=llama_tokenizer,
  223. device=decoder_model.device,
  224. max_new_tokens=req.max_new_tokens,
  225. text=req.text,
  226. top_p=req.top_p,
  227. repetition_penalty=req.repetition_penalty,
  228. temperature=req.temperature,
  229. compile=args.compile,
  230. iterative_prompt=req.chunk_length > 0,
  231. chunk_length=req.chunk_length,
  232. max_length=args.max_length,
  233. speaker=req.speaker,
  234. prompt_tokens=prompt_tokens,
  235. prompt_text=req.reference_text,
  236. )
  237. response_queue = queue.Queue()
  238. llama_queue.put(
  239. GenerateRequest(
  240. request=request,
  241. response_queue=response_queue,
  242. )
  243. )
  244. if req.streaming:
  245. yield wav_chunk_header()
  246. segments = []
  247. while True:
  248. result: WrappedGenerateResponse = response_queue.get()
  249. if result.status == "error":
  250. raise result.response
  251. break
  252. result: GenerateResponse = result.response
  253. if result.action == "next":
  254. break
  255. text_tokens = llama_tokenizer.encode(result.text, return_tensors="pt").to(
  256. decoder_model.device
  257. )
  258. with torch.autocast(
  259. device_type=decoder_model.device.type, dtype=args.precision
  260. ):
  261. fake_audios = decode_vq_tokens(
  262. decoder_model=decoder_model,
  263. codes=result.codes,
  264. text_tokens=text_tokens,
  265. reference_embedding=reference_embedding,
  266. )
  267. fake_audios = fake_audios.float().cpu().numpy()
  268. if req.streaming:
  269. yield (fake_audios * 32768).astype(np.int16).tobytes()
  270. else:
  271. segments.append(fake_audios)
  272. if req.streaming:
  273. return
  274. if len(segments) == 0:
  275. raise HTTPException(
  276. HTTPStatus.INTERNAL_SERVER_ERROR,
  277. content="No audio generated, please check the input text.",
  278. )
  279. fake_audios = np.concatenate(segments, axis=0)
  280. yield fake_audios
  281. @routes.http.post("/v1/invoke")
  282. def api_invoke_model(
  283. req: Annotated[InvokeRequest, Body(exclusive=True)],
  284. ):
  285. """
  286. Invoke model and generate audio
  287. """
  288. if args.max_text_length > 0 and len(req.text) > args.max_text_length:
  289. raise HTTPException(
  290. HTTPStatus.BAD_REQUEST,
  291. content=f"Text is too long, max length is {args.max_text_length}",
  292. )
  293. if req.streaming and req.format != "wav":
  294. raise HTTPException(
  295. HTTPStatus.BAD_REQUEST,
  296. content="Streaming only supports WAV format",
  297. )
  298. generator = inference(req)
  299. if req.streaming:
  300. return StreamResponse(
  301. iterable=generator,
  302. headers={
  303. "Content-Disposition": f"attachment; filename=audio.{req.format}",
  304. },
  305. content_type=get_content_type(req.format),
  306. )
  307. else:
  308. fake_audios = next(generator)
  309. buffer = io.BytesIO()
  310. sf.write(buffer, fake_audios, decoder_model.sampling_rate, format=req.format)
  311. return StreamResponse(
  312. iterable=[buffer.getvalue()],
  313. headers={
  314. "Content-Disposition": f"attachment; filename=audio.{req.format}",
  315. },
  316. content_type=get_content_type(req.format),
  317. )
  318. @routes.http.post("/v1/health")
  319. def api_health():
  320. """
  321. Health check
  322. """
  323. return JSONResponse({"status": "ok"})
  324. def parse_args():
  325. parser = ArgumentParser()
  326. parser.add_argument(
  327. "--llama-checkpoint-path",
  328. type=str,
  329. default="checkpoints/text2semantic-sft-medium-v1-4k.pth",
  330. )
  331. parser.add_argument(
  332. "--llama-config-name", type=str, default="dual_ar_2_codebook_medium"
  333. )
  334. parser.add_argument(
  335. "--decoder-checkpoint-path",
  336. type=str,
  337. default="checkpoints/vq-gan-group-fsq-2x1024.pth",
  338. )
  339. parser.add_argument("--decoder-config-name", type=str, default="vqgan_pretrain")
  340. parser.add_argument("--tokenizer", type=str, default="fishaudio/fish-speech-1")
  341. parser.add_argument("--device", type=str, default="cuda")
  342. parser.add_argument("--half", action="store_true")
  343. parser.add_argument("--max-length", type=int, default=2048)
  344. parser.add_argument("--compile", action="store_true")
  345. parser.add_argument("--max-text-length", type=int, default=0)
  346. parser.add_argument("--listen", type=str, default="127.0.0.1:8000")
  347. return parser.parse_args()
  348. # Define Kui app
  349. openapi = OpenAPI(
  350. {
  351. "title": "Fish Speech API",
  352. },
  353. ).routes
  354. app = Kui(
  355. routes=routes + openapi[1:], # Remove the default route
  356. exception_handlers={
  357. HTTPException: http_execption_handler,
  358. Exception: other_exception_handler,
  359. },
  360. cors_config={},
  361. )
  362. if __name__ == "__main__":
  363. import threading
  364. from zibai import create_bind_socket, serve
  365. args = parse_args()
  366. args.precision = torch.half if args.half else torch.bfloat16
  367. logger.info("Loading Llama model...")
  368. llama_queue = launch_thread_safe_queue(
  369. config_name=args.llama_config_name,
  370. checkpoint_path=args.llama_checkpoint_path,
  371. device=args.device,
  372. precision=args.precision,
  373. max_length=args.max_length,
  374. compile=args.compile,
  375. )
  376. llama_tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
  377. logger.info("Llama model loaded, loading VQ-GAN model...")
  378. decoder_model = load_decoder_model(
  379. config_name=args.decoder_config_name,
  380. checkpoint_path=args.decoder_checkpoint_path,
  381. device=args.device,
  382. )
  383. logger.info("VQ-GAN model loaded, warming up...")
  384. # Dry run to check if the model is loaded correctly and avoid the first-time latency
  385. list(
  386. inference(
  387. InvokeRequest(
  388. text="A warm-up sentence.",
  389. reference_text=None,
  390. reference_audio=None,
  391. max_new_tokens=0,
  392. chunk_length=150,
  393. top_p=0.7,
  394. repetition_penalty=1.5,
  395. temperature=0.7,
  396. speaker=None,
  397. emotion=None,
  398. format="wav",
  399. ref_base=None,
  400. ref_json=None,
  401. )
  402. )
  403. )
  404. logger.info(f"Warming up done, starting server at http://{args.listen}")
  405. sock = create_bind_socket(args.listen)
  406. sock.listen()
  407. # Start server
  408. serve(
  409. app=app,
  410. bind_sockets=[sock],
  411. max_workers=10,
  412. graceful_exit=threading.Event(),
  413. )