api.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483
  1. import base64
  2. import io
  3. import json
  4. import queue
  5. import random
  6. import traceback
  7. import wave
  8. from argparse import ArgumentParser
  9. from http import HTTPStatus
  10. from pathlib import Path
  11. from typing import Annotated, Literal, Optional
  12. import librosa
  13. import numpy as np
  14. import pyrootutils
  15. import soundfile as sf
  16. import torch
  17. from kui.asgi import (
  18. Body,
  19. HTTPException,
  20. HttpView,
  21. JSONResponse,
  22. Kui,
  23. OpenAPI,
  24. StreamResponse,
  25. )
  26. from kui.asgi.routing import MultimethodRoutes
  27. from loguru import logger
  28. from pydantic import BaseModel, Field
  29. pyrootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
  30. # from fish_speech.models.vqgan.lit_module import VQGAN
  31. from fish_speech.models.vqgan.modules.firefly import FireflyArchitecture
  32. from fish_speech.utils import autocast_exclude_mps
  33. from tools.auto_rerank import batch_asr, calculate_wer, is_chinese, load_model
  34. from tools.llama.generate import (
  35. GenerateRequest,
  36. GenerateResponse,
  37. WrappedGenerateResponse,
  38. launch_thread_safe_queue,
  39. )
  40. from tools.vqgan.inference import load_model as load_decoder_model
  41. def wav_chunk_header(sample_rate=44100, bit_depth=16, channels=1):
  42. buffer = io.BytesIO()
  43. with wave.open(buffer, "wb") as wav_file:
  44. wav_file.setnchannels(channels)
  45. wav_file.setsampwidth(bit_depth // 8)
  46. wav_file.setframerate(sample_rate)
  47. wav_header_bytes = buffer.getvalue()
  48. buffer.close()
  49. return wav_header_bytes
  50. # Define utils for web server
  51. async def http_execption_handler(exc: HTTPException):
  52. return JSONResponse(
  53. dict(
  54. statusCode=exc.status_code,
  55. message=exc.content,
  56. error=HTTPStatus(exc.status_code).phrase,
  57. ),
  58. exc.status_code,
  59. exc.headers,
  60. )
  61. async def other_exception_handler(exc: "Exception"):
  62. traceback.print_exc()
  63. status = HTTPStatus.INTERNAL_SERVER_ERROR
  64. return JSONResponse(
  65. dict(statusCode=status, message=str(exc), error=status.phrase),
  66. status,
  67. )
  68. def load_audio(reference_audio, sr):
  69. if len(reference_audio) > 255 or not Path(reference_audio).exists():
  70. try:
  71. audio_data = base64.b64decode(reference_audio)
  72. reference_audio = io.BytesIO(audio_data)
  73. except base64.binascii.Error:
  74. raise ValueError("Invalid path or base64 string")
  75. audio, _ = librosa.load(reference_audio, sr=sr, mono=True)
  76. return audio
  77. def encode_reference(*, decoder_model, reference_audio, enable_reference_audio):
  78. if enable_reference_audio and reference_audio is not None:
  79. # Load audios, and prepare basic info here
  80. reference_audio_content = load_audio(
  81. reference_audio, decoder_model.spec_transform.sample_rate
  82. )
  83. audios = torch.from_numpy(reference_audio_content).to(decoder_model.device)[
  84. None, None, :
  85. ]
  86. audio_lengths = torch.tensor(
  87. [audios.shape[2]], device=decoder_model.device, dtype=torch.long
  88. )
  89. logger.info(
  90. f"Loaded audio with {audios.shape[2] / decoder_model.spec_transform.sample_rate:.2f} seconds"
  91. )
  92. # VQ Encoder
  93. if isinstance(decoder_model, FireflyArchitecture):
  94. prompt_tokens = decoder_model.encode(audios, audio_lengths)[0][0]
  95. logger.info(f"Encoded prompt: {prompt_tokens.shape}")
  96. else:
  97. prompt_tokens = None
  98. logger.info("No reference audio provided")
  99. return prompt_tokens
  100. def decode_vq_tokens(
  101. *,
  102. decoder_model,
  103. codes,
  104. ):
  105. feature_lengths = torch.tensor([codes.shape[1]], device=decoder_model.device)
  106. logger.info(f"VQ features: {codes.shape}")
  107. if isinstance(decoder_model, FireflyArchitecture):
  108. # VQGAN Inference
  109. return decoder_model.decode(
  110. indices=codes[None],
  111. feature_lengths=feature_lengths,
  112. ).squeeze()
  113. raise ValueError(f"Unknown model type: {type(decoder_model)}")
  114. routes = MultimethodRoutes(base_class=HttpView)
  115. def get_random_paths(base_path, data, speaker, emotion):
  116. if base_path and data and speaker and emotion and (Path(base_path).exists()):
  117. if speaker in data and emotion in data[speaker]:
  118. files = data[speaker][emotion]
  119. lab_files = [f for f in files if f.endswith(".lab")]
  120. wav_files = [f for f in files if f.endswith(".wav")]
  121. if lab_files and wav_files:
  122. selected_lab = random.choice(lab_files)
  123. selected_wav = random.choice(wav_files)
  124. lab_path = Path(base_path) / speaker / emotion / selected_lab
  125. wav_path = Path(base_path) / speaker / emotion / selected_wav
  126. if lab_path.exists() and wav_path.exists():
  127. return lab_path, wav_path
  128. return None, None
  129. def load_json(json_file):
  130. if not json_file:
  131. logger.info("Not using a json file")
  132. return None
  133. try:
  134. with open(json_file, "r", encoding="utf-8") as file:
  135. data = json.load(file)
  136. except FileNotFoundError:
  137. logger.warning(f"ref json not found: {json_file}")
  138. data = None
  139. except Exception as e:
  140. logger.warning(f"Loading json failed: {e}")
  141. data = None
  142. return data
  143. class InvokeRequest(BaseModel):
  144. text: str = "你说的对, 但是原神是一款由米哈游自主研发的开放世界手游."
  145. reference_text: Optional[str] = None
  146. reference_audio: Optional[str] = None
  147. max_new_tokens: int = 1024
  148. chunk_length: Annotated[int, Field(ge=0, le=500, strict=True)] = 100
  149. top_p: Annotated[float, Field(ge=0.1, le=1.0, strict=True)] = 0.7
  150. repetition_penalty: Annotated[float, Field(ge=0.9, le=2.0, strict=True)] = 1.2
  151. temperature: Annotated[float, Field(ge=0.1, le=1.0, strict=True)] = 0.7
  152. emotion: Optional[str] = None
  153. format: Literal["wav", "mp3", "flac"] = "wav"
  154. streaming: bool = False
  155. ref_json: Optional[str] = "ref_data.json"
  156. ref_base: Optional[str] = "ref_data"
  157. speaker: Optional[str] = None
  158. def get_content_type(audio_format):
  159. if audio_format == "wav":
  160. return "audio/wav"
  161. elif audio_format == "flac":
  162. return "audio/flac"
  163. elif audio_format == "mp3":
  164. return "audio/mpeg"
  165. else:
  166. return "application/octet-stream"
  167. @torch.inference_mode()
  168. def inference(req: InvokeRequest):
  169. # Parse reference audio aka prompt
  170. prompt_tokens = None
  171. ref_data = load_json(req.ref_json)
  172. ref_base = req.ref_base
  173. lab_path, wav_path = get_random_paths(ref_base, ref_data, req.speaker, req.emotion)
  174. if lab_path and wav_path:
  175. with open(lab_path, "r", encoding="utf-8") as lab_file:
  176. ref_text = lab_file.read()
  177. req.reference_audio = wav_path
  178. req.reference_text = ref_text
  179. logger.info("ref_path: " + str(wav_path))
  180. logger.info("ref_text: " + ref_text)
  181. # Parse reference audio aka prompt
  182. prompt_tokens = encode_reference(
  183. decoder_model=decoder_model,
  184. reference_audio=req.reference_audio,
  185. enable_reference_audio=req.reference_audio is not None,
  186. )
  187. logger.info(f"ref_text: {req.reference_text}")
  188. # LLAMA Inference
  189. request = dict(
  190. device=decoder_model.device,
  191. max_new_tokens=req.max_new_tokens,
  192. text=req.text,
  193. top_p=req.top_p,
  194. repetition_penalty=req.repetition_penalty,
  195. temperature=req.temperature,
  196. compile=args.compile,
  197. iterative_prompt=req.chunk_length > 0,
  198. chunk_length=req.chunk_length,
  199. max_length=2048,
  200. prompt_tokens=prompt_tokens,
  201. prompt_text=req.reference_text,
  202. )
  203. response_queue = queue.Queue()
  204. llama_queue.put(
  205. GenerateRequest(
  206. request=request,
  207. response_queue=response_queue,
  208. )
  209. )
  210. if req.streaming:
  211. yield wav_chunk_header()
  212. segments = []
  213. while True:
  214. result: WrappedGenerateResponse = response_queue.get()
  215. if result.status == "error":
  216. raise result.response
  217. break
  218. result: GenerateResponse = result.response
  219. if result.action == "next":
  220. break
  221. with autocast_exclude_mps(
  222. device_type=decoder_model.device.type, dtype=args.precision
  223. ):
  224. fake_audios = decode_vq_tokens(
  225. decoder_model=decoder_model,
  226. codes=result.codes,
  227. )
  228. fake_audios = fake_audios.float().cpu().numpy()
  229. if req.streaming:
  230. yield (fake_audios * 32768).astype(np.int16).tobytes()
  231. else:
  232. segments.append(fake_audios)
  233. if req.streaming:
  234. return
  235. if len(segments) == 0:
  236. raise HTTPException(
  237. HTTPStatus.INTERNAL_SERVER_ERROR,
  238. content="No audio generated, please check the input text.",
  239. )
  240. fake_audios = np.concatenate(segments, axis=0)
  241. yield fake_audios
  242. def auto_rerank_inference(req: InvokeRequest, use_auto_rerank: bool = True):
  243. if not use_auto_rerank:
  244. # 如果不使用 auto_rerank,直接调用原始的 inference 函数
  245. return inference(req)
  246. zh_model, en_model = load_model()
  247. max_attempts = 5
  248. best_wer = float("inf")
  249. best_audio = None
  250. for attempt in range(max_attempts):
  251. # 调用原始的 inference 函数
  252. audio_generator = inference(req)
  253. fake_audios = next(audio_generator)
  254. asr_result = batch_asr(
  255. zh_model if is_chinese(req.text) else en_model, [fake_audios], 44100
  256. )[0]
  257. wer = calculate_wer(req.text, asr_result["text"])
  258. if wer <= 0.1 and not asr_result["huge_gap"]:
  259. return fake_audios
  260. if wer < best_wer:
  261. best_wer = wer
  262. best_audio = fake_audios
  263. if attempt == max_attempts - 1:
  264. break
  265. return best_audio
  266. async def inference_async(req: InvokeRequest):
  267. for chunk in inference(req):
  268. yield chunk
  269. async def buffer_to_async_generator(buffer):
  270. yield buffer
  271. @routes.http.post("/v1/invoke")
  272. async def api_invoke_model(
  273. req: Annotated[InvokeRequest, Body(exclusive=True)],
  274. ):
  275. """
  276. Invoke model and generate audio
  277. """
  278. if args.max_text_length > 0 and len(req.text) > args.max_text_length:
  279. raise HTTPException(
  280. HTTPStatus.BAD_REQUEST,
  281. content=f"Text is too long, max length is {args.max_text_length}",
  282. )
  283. if req.streaming and req.format != "wav":
  284. raise HTTPException(
  285. HTTPStatus.BAD_REQUEST,
  286. content="Streaming only supports WAV format",
  287. )
  288. if req.streaming:
  289. return StreamResponse(
  290. iterable=inference_async(req),
  291. headers={
  292. "Content-Disposition": f"attachment; filename=audio.{req.format}",
  293. },
  294. content_type=get_content_type(req.format),
  295. )
  296. else:
  297. fake_audios = next(inference(req))
  298. buffer = io.BytesIO()
  299. sf.write(
  300. buffer,
  301. fake_audios,
  302. decoder_model.spec_transform.sample_rate,
  303. format=req.format,
  304. )
  305. return StreamResponse(
  306. iterable=buffer_to_async_generator(buffer.getvalue()),
  307. headers={
  308. "Content-Disposition": f"attachment; filename=audio.{req.format}",
  309. },
  310. content_type=get_content_type(req.format),
  311. )
  312. @routes.http.post("/v1/health")
  313. async def api_health():
  314. """
  315. Health check
  316. """
  317. return JSONResponse({"status": "ok"})
  318. def parse_args():
  319. parser = ArgumentParser()
  320. parser.add_argument(
  321. "--llama-checkpoint-path",
  322. type=str,
  323. default="checkpoints/fish-speech-1.2-sft",
  324. )
  325. parser.add_argument(
  326. "--decoder-checkpoint-path",
  327. type=str,
  328. default="checkpoints/fish-speech-1.2-sft/firefly-gan-vq-fsq-4x1024-42hz-generator.pth",
  329. )
  330. parser.add_argument("--decoder-config-name", type=str, default="firefly_gan_vq")
  331. parser.add_argument("--device", type=str, default="cuda")
  332. parser.add_argument("--half", action="store_true")
  333. parser.add_argument("--compile", action="store_true")
  334. parser.add_argument("--max-text-length", type=int, default=0)
  335. parser.add_argument("--listen", type=str, default="127.0.0.1:8000")
  336. parser.add_argument("--workers", type=int, default=1)
  337. parser.add_argument("--use-auto-rerank", type=bool, default=True)
  338. return parser.parse_args()
  339. # Define Kui app
  340. openapi = OpenAPI(
  341. {
  342. "title": "Fish Speech API",
  343. },
  344. ).routes
  345. app = Kui(
  346. routes=routes + openapi[1:], # Remove the default route
  347. exception_handlers={
  348. HTTPException: http_execption_handler,
  349. Exception: other_exception_handler,
  350. },
  351. cors_config={},
  352. )
  353. if __name__ == "__main__":
  354. import threading
  355. import uvicorn
  356. args = parse_args()
  357. args.precision = torch.half if args.half else torch.bfloat16
  358. logger.info("Loading Llama model...")
  359. llama_queue = launch_thread_safe_queue(
  360. checkpoint_path=args.llama_checkpoint_path,
  361. device=args.device,
  362. precision=args.precision,
  363. compile=args.compile,
  364. )
  365. logger.info("Llama model loaded, loading VQ-GAN model...")
  366. decoder_model = load_decoder_model(
  367. config_name=args.decoder_config_name,
  368. checkpoint_path=args.decoder_checkpoint_path,
  369. device=args.device,
  370. )
  371. logger.info("VQ-GAN model loaded, warming up...")
  372. # Dry run to check if the model is loaded correctly and avoid the first-time latency
  373. list(
  374. inference(
  375. InvokeRequest(
  376. text="Hello world.",
  377. reference_text=None,
  378. reference_audio=None,
  379. max_new_tokens=0,
  380. top_p=0.7,
  381. repetition_penalty=1.2,
  382. temperature=0.7,
  383. emotion=None,
  384. format="wav",
  385. ref_base=None,
  386. ref_json=None,
  387. )
  388. )
  389. )
  390. logger.info(f"Warming up done, starting server at http://{args.listen}")
  391. host, port = args.listen.split(":")
  392. uvicorn.run(app, host=host, port=int(port), workers=args.workers, log_level="info")