api.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. import base64
  2. import io
  3. import json
  4. import queue
  5. import random
  6. import sys
  7. import traceback
  8. import wave
  9. from argparse import ArgumentParser
  10. from http import HTTPStatus
  11. from pathlib import Path
  12. from typing import Annotated, Any, Literal, Optional
  13. import numpy as np
  14. import ormsgpack
  15. import pyrootutils
  16. import soundfile as sf
  17. import torch
  18. import torchaudio
  19. from baize.datastructures import ContentType
  20. from kui.asgi import (
  21. Body,
  22. FactoryClass,
  23. HTTPException,
  24. HttpRequest,
  25. HttpView,
  26. JSONResponse,
  27. Kui,
  28. OpenAPI,
  29. StreamResponse,
  30. )
  31. from kui.asgi.routing import MultimethodRoutes
  32. from loguru import logger
  33. from pydantic import BaseModel, Field, conint
  34. pyrootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
  35. # from fish_speech.models.vqgan.lit_module import VQGAN
  36. from fish_speech.models.vqgan.modules.firefly import FireflyArchitecture
  37. from fish_speech.text.chn_text_norm.text import Text as ChnNormedText
  38. from fish_speech.utils import autocast_exclude_mps
  39. from tools.auto_rerank import batch_asr, calculate_wer, is_chinese, load_model
  40. from tools.file import AUDIO_EXTENSIONS, audio_to_bytes, list_files, read_ref_text
  41. from tools.llama.generate import (
  42. GenerateRequest,
  43. GenerateResponse,
  44. WrappedGenerateResponse,
  45. launch_thread_safe_queue,
  46. )
  47. from tools.vqgan.inference import load_model as load_decoder_model
  48. def wav_chunk_header(sample_rate=44100, bit_depth=16, channels=1):
  49. buffer = io.BytesIO()
  50. with wave.open(buffer, "wb") as wav_file:
  51. wav_file.setnchannels(channels)
  52. wav_file.setsampwidth(bit_depth // 8)
  53. wav_file.setframerate(sample_rate)
  54. wav_header_bytes = buffer.getvalue()
  55. buffer.close()
  56. return wav_header_bytes
  57. # Define utils for web server
  58. async def http_execption_handler(exc: HTTPException):
  59. return JSONResponse(
  60. dict(
  61. statusCode=exc.status_code,
  62. message=exc.content,
  63. error=HTTPStatus(exc.status_code).phrase,
  64. ),
  65. exc.status_code,
  66. exc.headers,
  67. )
  68. async def other_exception_handler(exc: "Exception"):
  69. traceback.print_exc()
  70. status = HTTPStatus.INTERNAL_SERVER_ERROR
  71. return JSONResponse(
  72. dict(statusCode=status, message=str(exc), error=status.phrase),
  73. status,
  74. )
  75. def load_audio(reference_audio, sr):
  76. if len(reference_audio) > 255 or not Path(reference_audio).exists():
  77. audio_data = reference_audio
  78. reference_audio = io.BytesIO(audio_data)
  79. waveform, original_sr = torchaudio.load(
  80. reference_audio, backend="sox" if sys.platform == "linux" else "soundfile"
  81. )
  82. if waveform.shape[0] > 1:
  83. waveform = torch.mean(waveform, dim=0, keepdim=True)
  84. if original_sr != sr:
  85. resampler = torchaudio.transforms.Resample(orig_freq=original_sr, new_freq=sr)
  86. waveform = resampler(waveform)
  87. audio = waveform.squeeze().numpy()
  88. return audio
  89. def encode_reference(*, decoder_model, reference_audio, enable_reference_audio):
  90. if enable_reference_audio and reference_audio is not None:
  91. # Load audios, and prepare basic info here
  92. reference_audio_content = load_audio(
  93. reference_audio, decoder_model.spec_transform.sample_rate
  94. )
  95. audios = torch.from_numpy(reference_audio_content).to(decoder_model.device)[
  96. None, None, :
  97. ]
  98. audio_lengths = torch.tensor(
  99. [audios.shape[2]], device=decoder_model.device, dtype=torch.long
  100. )
  101. logger.info(
  102. f"Loaded audio with {audios.shape[2] / decoder_model.spec_transform.sample_rate:.2f} seconds"
  103. )
  104. # VQ Encoder
  105. if isinstance(decoder_model, FireflyArchitecture):
  106. prompt_tokens = decoder_model.encode(audios, audio_lengths)[0][0]
  107. logger.info(f"Encoded prompt: {prompt_tokens.shape}")
  108. else:
  109. prompt_tokens = None
  110. logger.info("No reference audio provided")
  111. return prompt_tokens
  112. def decode_vq_tokens(
  113. *,
  114. decoder_model,
  115. codes,
  116. ):
  117. feature_lengths = torch.tensor([codes.shape[1]], device=decoder_model.device)
  118. logger.info(f"VQ features: {codes.shape}")
  119. if isinstance(decoder_model, FireflyArchitecture):
  120. # VQGAN Inference
  121. return decoder_model.decode(
  122. indices=codes[None],
  123. feature_lengths=feature_lengths,
  124. ).squeeze()
  125. raise ValueError(f"Unknown model type: {type(decoder_model)}")
  126. routes = MultimethodRoutes(base_class=HttpView)
  127. class ServeReferenceAudio(BaseModel):
  128. audio: bytes
  129. text: str
  130. class ServeTTSRequest(BaseModel):
  131. text: str = "你说的对, 但是原神是一款由米哈游自主研发的开放世界手游."
  132. chunk_length: Annotated[int, conint(ge=100, le=300, strict=True)] = 200
  133. # Audio format
  134. format: Literal["wav", "pcm", "mp3"] = "wav"
  135. mp3_bitrate: Literal[64, 128, 192] = 128
  136. # References audios for in-context learning
  137. references: list[ServeReferenceAudio] = []
  138. # Reference id
  139. # For example, if you want use https://fish.audio/m/7f92f8afb8ec43bf81429cc1c9199cb1/
  140. # Just pass 7f92f8afb8ec43bf81429cc1c9199cb1
  141. reference_id: str | None = None
  142. # Normalize text for en & zh, this increase stability for numbers
  143. normalize: bool = True
  144. mp3_bitrate: Optional[int] = 64
  145. opus_bitrate: Optional[int] = -1000
  146. # Balance mode will reduce latency to 300ms, but may decrease stability
  147. latency: Literal["normal", "balanced"] = "normal"
  148. # not usually used below
  149. streaming: bool = False
  150. emotion: Optional[str] = None
  151. max_new_tokens: int = 1024
  152. top_p: Annotated[float, Field(ge=0.1, le=1.0, strict=True)] = 0.7
  153. repetition_penalty: Annotated[float, Field(ge=0.9, le=2.0, strict=True)] = 1.2
  154. temperature: Annotated[float, Field(ge=0.1, le=1.0, strict=True)] = 0.7
  155. def get_content_type(audio_format):
  156. if audio_format == "wav":
  157. return "audio/wav"
  158. elif audio_format == "flac":
  159. return "audio/flac"
  160. elif audio_format == "mp3":
  161. return "audio/mpeg"
  162. else:
  163. return "application/octet-stream"
  164. @torch.inference_mode()
  165. def inference(req: ServeTTSRequest):
  166. idstr: str | None = req.reference_id
  167. if idstr is not None:
  168. ref_folder = Path("references") / idstr
  169. ref_folder.mkdir(parents=True, exist_ok=True)
  170. ref_audios = list_files(
  171. ref_folder, AUDIO_EXTENSIONS, recursive=True, sort=False
  172. )
  173. prompt_tokens = [
  174. encode_reference(
  175. decoder_model=decoder_model,
  176. reference_audio=audio_to_bytes(str(ref_audio)),
  177. enable_reference_audio=True,
  178. )
  179. for ref_audio in ref_audios
  180. ]
  181. prompt_texts = [
  182. read_ref_text(str(ref_audio.with_suffix(".lab")))
  183. for ref_audio in ref_audios
  184. ]
  185. else:
  186. # Parse reference audio aka prompt
  187. refs = req.references
  188. if refs is None:
  189. refs = []
  190. prompt_tokens = [
  191. encode_reference(
  192. decoder_model=decoder_model,
  193. reference_audio=ref.audio,
  194. enable_reference_audio=True,
  195. )
  196. for ref in refs
  197. ]
  198. prompt_texts = [ref.text for ref in refs]
  199. # LLAMA Inference
  200. request = dict(
  201. device=decoder_model.device,
  202. max_new_tokens=req.max_new_tokens,
  203. text=(
  204. req.text
  205. if not req.normalize
  206. else ChnNormedText(raw_text=req.text).normalize()
  207. ),
  208. top_p=req.top_p,
  209. repetition_penalty=req.repetition_penalty,
  210. temperature=req.temperature,
  211. compile=args.compile,
  212. iterative_prompt=req.chunk_length > 0,
  213. chunk_length=req.chunk_length,
  214. max_length=2048,
  215. prompt_tokens=prompt_tokens,
  216. prompt_text=prompt_texts,
  217. )
  218. response_queue = queue.Queue()
  219. llama_queue.put(
  220. GenerateRequest(
  221. request=request,
  222. response_queue=response_queue,
  223. )
  224. )
  225. if req.streaming:
  226. yield wav_chunk_header()
  227. segments = []
  228. while True:
  229. result: WrappedGenerateResponse = response_queue.get()
  230. if result.status == "error":
  231. raise result.response
  232. break
  233. result: GenerateResponse = result.response
  234. if result.action == "next":
  235. break
  236. with autocast_exclude_mps(
  237. device_type=decoder_model.device.type, dtype=args.precision
  238. ):
  239. fake_audios = decode_vq_tokens(
  240. decoder_model=decoder_model,
  241. codes=result.codes,
  242. )
  243. fake_audios = fake_audios.float().cpu().numpy()
  244. if req.streaming:
  245. yield (fake_audios * 32768).astype(np.int16).tobytes()
  246. else:
  247. segments.append(fake_audios)
  248. if req.streaming:
  249. return
  250. if len(segments) == 0:
  251. raise HTTPException(
  252. HTTPStatus.INTERNAL_SERVER_ERROR,
  253. content="No audio generated, please check the input text.",
  254. )
  255. fake_audios = np.concatenate(segments, axis=0)
  256. yield fake_audios
  257. async def inference_async(req: ServeTTSRequest):
  258. for chunk in inference(req):
  259. yield chunk
  260. async def buffer_to_async_generator(buffer):
  261. yield buffer
  262. @routes.http.post("/v1/tts")
  263. async def api_invoke_model(
  264. req: Annotated[ServeTTSRequest, Body(exclusive=True)],
  265. ):
  266. """
  267. Invoke model and generate audio
  268. """
  269. if args.max_text_length > 0 and len(req.text) > args.max_text_length:
  270. raise HTTPException(
  271. HTTPStatus.BAD_REQUEST,
  272. content=f"Text is too long, max length is {args.max_text_length}",
  273. )
  274. if req.streaming and req.format != "wav":
  275. raise HTTPException(
  276. HTTPStatus.BAD_REQUEST,
  277. content="Streaming only supports WAV format",
  278. )
  279. if req.streaming:
  280. return StreamResponse(
  281. iterable=inference_async(req),
  282. headers={
  283. "Content-Disposition": f"attachment; filename=audio.{req.format}",
  284. },
  285. content_type=get_content_type(req.format),
  286. )
  287. else:
  288. fake_audios = next(inference(req))
  289. buffer = io.BytesIO()
  290. sf.write(
  291. buffer,
  292. fake_audios,
  293. decoder_model.spec_transform.sample_rate,
  294. format=req.format,
  295. )
  296. return StreamResponse(
  297. iterable=buffer_to_async_generator(buffer.getvalue()),
  298. headers={
  299. "Content-Disposition": f"attachment; filename=audio.{req.format}",
  300. },
  301. content_type=get_content_type(req.format),
  302. )
  303. @routes.http.post("/v1/health")
  304. async def api_health():
  305. """
  306. Health check
  307. """
  308. return JSONResponse({"status": "ok"})
  309. def parse_args():
  310. parser = ArgumentParser()
  311. parser.add_argument(
  312. "--llama-checkpoint-path",
  313. type=str,
  314. default="checkpoints/fish-speech-1.2-sft",
  315. )
  316. parser.add_argument(
  317. "--decoder-checkpoint-path",
  318. type=str,
  319. default="checkpoints/fish-speech-1.2-sft/firefly-gan-vq-fsq-4x1024-42hz-generator.pth",
  320. )
  321. parser.add_argument("--decoder-config-name", type=str, default="firefly_gan_vq")
  322. parser.add_argument("--device", type=str, default="cuda")
  323. parser.add_argument("--half", action="store_true")
  324. parser.add_argument("--compile", action="store_true")
  325. parser.add_argument("--max-text-length", type=int, default=0)
  326. parser.add_argument("--listen", type=str, default="127.0.0.1:8080")
  327. parser.add_argument("--workers", type=int, default=1)
  328. parser.add_argument("--use-auto-rerank", type=bool, default=True)
  329. return parser.parse_args()
  330. # Define Kui app
  331. openapi = OpenAPI(
  332. {
  333. "title": "Fish Speech API",
  334. },
  335. ).routes
  336. class MsgPackRequest(HttpRequest):
  337. async def data(self) -> Annotated[Any, ContentType("application/msgpack")]:
  338. if self.content_type == "application/msgpack":
  339. return ormsgpack.unpackb(await self.body)
  340. raise HTTPException(
  341. HTTPStatus.UNSUPPORTED_MEDIA_TYPE,
  342. headers={"Accept": "application/msgpack"},
  343. )
  344. app = Kui(
  345. routes=routes + openapi[1:], # Remove the default route
  346. exception_handlers={
  347. HTTPException: http_execption_handler,
  348. Exception: other_exception_handler,
  349. },
  350. factory_class=FactoryClass(http=MsgPackRequest),
  351. cors_config={},
  352. )
  353. if __name__ == "__main__":
  354. import uvicorn
  355. args = parse_args()
  356. args.precision = torch.half if args.half else torch.bfloat16
  357. logger.info("Loading Llama model...")
  358. llama_queue = launch_thread_safe_queue(
  359. checkpoint_path=args.llama_checkpoint_path,
  360. device=args.device,
  361. precision=args.precision,
  362. compile=args.compile,
  363. )
  364. logger.info("Llama model loaded, loading VQ-GAN model...")
  365. decoder_model = load_decoder_model(
  366. config_name=args.decoder_config_name,
  367. checkpoint_path=args.decoder_checkpoint_path,
  368. device=args.device,
  369. )
  370. logger.info("VQ-GAN model loaded, warming up...")
  371. # Dry run to check if the model is loaded correctly and avoid the first-time latency
  372. list(
  373. inference(
  374. ServeTTSRequest(
  375. text="Hello world.",
  376. references=[],
  377. reference_id=None,
  378. max_new_tokens=0,
  379. top_p=0.7,
  380. repetition_penalty=1.2,
  381. temperature=0.7,
  382. emotion=None,
  383. format="wav",
  384. )
  385. )
  386. )
  387. logger.info(f"Warming up done, starting server at http://{args.listen}")
  388. host, port = args.listen.split(":")
  389. uvicorn.run(app, host=host, port=int(port), workers=args.workers, log_level="info")