api_client.py 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. import argparse
  2. import base64
  3. import time
  4. import wave
  5. import ormsgpack
  6. import pyaudio
  7. import requests
  8. from pydub import AudioSegment
  9. from pydub.playback import play
  10. from fish_speech.utils.file import audio_to_bytes, read_ref_text
  11. from fish_speech.utils.schema import ServeReferenceAudio, ServeTTSRequest
  12. def parse_args():
  13. parser = argparse.ArgumentParser(
  14. description="Send text to a Fish Speech TTS server and receive synthesized audio.",
  15. epilog=(
  16. "Model selection note:\n"
  17. " The base TTS model is selected by the server you call. For example, if the\n"
  18. " server was started with checkpoints/s2-pro, this client will use S2-Pro\n"
  19. " automatically. There is no separate per-request --model flag.\n\n"
  20. "Examples:\n"
  21. ' python tools/api_client.py -u http://127.0.0.1:8080/v1/tts -t "Hello from Fish Speech"\n'
  22. ' python tools/api_client.py -u http://127.0.0.1:8080/v1/tts -t "Hello" --reference_id my-speaker'
  23. ),
  24. formatter_class=argparse.RawTextHelpFormatter,
  25. )
  26. parser.add_argument(
  27. "--url",
  28. "-u",
  29. type=str,
  30. default="http://127.0.0.1:8080/v1/tts",
  31. help="URL of the TTS server. The server decides which base model is loaded.",
  32. )
  33. parser.add_argument(
  34. "--text", "-t", type=str, required=True, help="Text to be synthesized"
  35. )
  36. parser.add_argument(
  37. "--reference_id",
  38. "-id",
  39. type=str,
  40. default=None,
  41. help="ID of the reference voice to use for synthesis\n(Local: name of folder containing audios and files)",
  42. )
  43. parser.add_argument(
  44. "--reference_audio",
  45. "-ra",
  46. type=str,
  47. nargs="+",
  48. default=None,
  49. help="Path to the audio file",
  50. )
  51. parser.add_argument(
  52. "--reference_text",
  53. "-rt",
  54. type=str,
  55. nargs="+",
  56. default=None,
  57. help="Reference text for voice synthesis",
  58. )
  59. parser.add_argument(
  60. "--output",
  61. "-o",
  62. type=str,
  63. default="generated_audio",
  64. help="Output audio file name",
  65. )
  66. parser.add_argument(
  67. "--play",
  68. action=argparse.BooleanOptionalAction,
  69. default=True,
  70. help="Whether to play audio after receiving data",
  71. )
  72. parser.add_argument(
  73. "--format", type=str, choices=["wav", "pcm", "mp3", "opus"], default="wav"
  74. )
  75. parser.add_argument(
  76. "--latency",
  77. type=str,
  78. default="normal",
  79. choices=["normal", "balanced"],
  80. help="Used in api.fish.audio/v1/tts",
  81. )
  82. parser.add_argument(
  83. "--max_new_tokens",
  84. type=int,
  85. default=1024,
  86. help="Maximum new tokens to generate. \n0 means no limit.",
  87. )
  88. parser.add_argument(
  89. "--chunk_length", type=int, default=300, help="Chunk length for synthesis"
  90. )
  91. parser.add_argument(
  92. "--top_p", type=float, default=0.8, help="Top-p sampling for synthesis"
  93. )
  94. parser.add_argument(
  95. "--repetition_penalty",
  96. type=float,
  97. default=1.1,
  98. help="Repetition penalty for synthesis",
  99. )
  100. parser.add_argument(
  101. "--temperature", type=float, default=0.8, help="Temperature for sampling"
  102. )
  103. # parser.add_argument("--streaming", type=bool, default=False, help="Enable streaming response")
  104. parser.add_argument(
  105. "--streaming", action="store_true", help="Enable streaming response"
  106. )
  107. parser.add_argument(
  108. "--channels", type=int, default=1, help="Number of audio channels"
  109. )
  110. parser.add_argument("--rate", type=int, default=44100, help="Sample rate for audio")
  111. parser.add_argument(
  112. "--use_memory_cache",
  113. type=str,
  114. default="off",
  115. choices=["on", "off"],
  116. help="Cache encoded references codes in memory.\n",
  117. )
  118. parser.add_argument(
  119. "--seed",
  120. type=int,
  121. default=None,
  122. help="`None` means randomized inference, otherwise deterministic.\nIt can't be used for fixing a timbre.",
  123. )
  124. parser.add_argument(
  125. "--api_key",
  126. type=str,
  127. default="YOUR_API_KEY",
  128. help="API key for authentication",
  129. )
  130. return parser.parse_args()
  131. if __name__ == "__main__":
  132. args = parse_args()
  133. idstr: str | None = args.reference_id
  134. # priority: ref_id > [{text, audio},...]
  135. if idstr is None:
  136. ref_audios = args.reference_audio
  137. ref_texts = args.reference_text
  138. if ref_audios is None:
  139. byte_audios = []
  140. else:
  141. byte_audios = [audio_to_bytes(ref_audio) for ref_audio in ref_audios]
  142. if ref_texts is None:
  143. ref_texts = []
  144. else:
  145. ref_texts = [read_ref_text(ref_text) for ref_text in ref_texts]
  146. else:
  147. byte_audios = []
  148. ref_texts = []
  149. pass # in api.py
  150. data = {
  151. "text": args.text,
  152. "references": [
  153. ServeReferenceAudio(
  154. audio=ref_audio if ref_audio is not None else b"", text=ref_text
  155. )
  156. for ref_text, ref_audio in zip(ref_texts, byte_audios)
  157. ],
  158. "reference_id": idstr,
  159. "format": args.format,
  160. "latency": args.latency,
  161. "max_new_tokens": args.max_new_tokens,
  162. "chunk_length": args.chunk_length,
  163. "top_p": args.top_p,
  164. "repetition_penalty": args.repetition_penalty,
  165. "temperature": args.temperature,
  166. "streaming": args.streaming,
  167. "use_memory_cache": args.use_memory_cache,
  168. "seed": args.seed,
  169. }
  170. pydantic_data = ServeTTSRequest(**data)
  171. print("Sending request")
  172. start_time = time.time()
  173. response = requests.post(
  174. args.url,
  175. params={"format": "msgpack"},
  176. data=ormsgpack.packb(pydantic_data, option=ormsgpack.OPT_SERIALIZE_PYDANTIC),
  177. stream=args.streaming,
  178. headers={
  179. "authorization": f"Bearer {args.api_key}",
  180. "content-type": "application/msgpack",
  181. },
  182. )
  183. end_time = time.time()
  184. print(f"Request took {end_time - start_time} seconds")
  185. if response.status_code == 200:
  186. if args.streaming:
  187. p = pyaudio.PyAudio()
  188. audio_format = pyaudio.paInt16 # Assuming 16-bit PCM format
  189. stream = p.open(
  190. format=audio_format, channels=args.channels, rate=args.rate, output=True
  191. )
  192. wf = wave.open(f"{args.output}.wav", "wb")
  193. wf.setnchannels(args.channels)
  194. wf.setsampwidth(p.get_sample_size(audio_format))
  195. wf.setframerate(args.rate)
  196. stream_stopped_flag = False
  197. try:
  198. for chunk in response.iter_content(chunk_size=1024):
  199. if chunk:
  200. stream.write(chunk)
  201. wf.writeframesraw(chunk)
  202. else:
  203. if not stream_stopped_flag:
  204. stream.stop_stream()
  205. stream_stopped_flag = True
  206. finally:
  207. stream.close()
  208. p.terminate()
  209. wf.close()
  210. else:
  211. audio_content = response.content
  212. audio_path = f"{args.output}.{args.format}"
  213. with open(audio_path, "wb") as audio_file:
  214. audio_file.write(audio_content)
  215. audio = AudioSegment.from_file(audio_path, format=args.format)
  216. if args.play:
  217. play(audio)
  218. print(f"Audio has been saved to '{audio_path}'.")
  219. else:
  220. print(f"Request failed with status code {response.status_code}")
  221. print(response.json())