e2e_webui.py 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. import io
  2. import re
  3. import wave
  4. import gradio as gr
  5. from fish_speech.utils.schema import ServeMessage, ServeTextPart, ServeVQPart
  6. from .fish_e2e import FishE2EAgent, FishE2EEventType
  7. def wav_chunk_header(sample_rate=44100, bit_depth=16, channels=1):
  8. buffer = io.BytesIO()
  9. with wave.open(buffer, "wb") as wav_file:
  10. wav_file.setnchannels(channels)
  11. wav_file.setsampwidth(bit_depth // 8)
  12. wav_file.setframerate(sample_rate)
  13. wav_header_bytes = buffer.getvalue()
  14. buffer.close()
  15. return wav_header_bytes
  16. class ChatState:
  17. def __init__(self):
  18. self.conversation = []
  19. self.added_systext = False
  20. self.added_sysaudio = False
  21. def get_history(self):
  22. results = []
  23. for msg in self.conversation:
  24. results.append({"role": msg.role, "content": self.repr_message(msg)})
  25. # Process assistant messages to extract questions and update user messages
  26. for i, msg in enumerate(results):
  27. if msg["role"] == "assistant":
  28. match = re.search(r"Question: (.*?)\n\nResponse:", msg["content"])
  29. if match and i > 0 and results[i - 1]["role"] == "user":
  30. # Update previous user message with extracted question
  31. results[i - 1]["content"] += "\n" + match.group(1)
  32. # Remove the Question/Answer format from assistant message
  33. msg["content"] = msg["content"].split("\n\nResponse: ", 1)[1]
  34. return results
  35. def repr_message(self, msg: ServeMessage):
  36. response = ""
  37. for part in msg.parts:
  38. if isinstance(part, ServeTextPart):
  39. response += part.text
  40. elif isinstance(part, ServeVQPart):
  41. response += f"<audio {len(part.codes[0]) / 21:.2f}s>"
  42. return response
  43. def clear_fn():
  44. return [], ChatState(), None, None, None
  45. async def process_audio_input(
  46. sys_audio_input, sys_text_input, audio_input, state: ChatState, text_input: str
  47. ):
  48. if audio_input is None and not text_input:
  49. raise gr.Error("No input provided")
  50. agent = FishE2EAgent() # Create new agent instance for each request
  51. # Convert audio input to numpy array
  52. if isinstance(audio_input, tuple):
  53. sr, audio_data = audio_input
  54. elif text_input:
  55. sr = 44100
  56. audio_data = None
  57. else:
  58. raise gr.Error("Invalid audio format")
  59. if isinstance(sys_audio_input, tuple):
  60. sr, sys_audio_data = sys_audio_input
  61. else:
  62. sr = 44100
  63. sys_audio_data = None
  64. def append_to_chat_ctx(
  65. part: ServeTextPart | ServeVQPart, role: str = "assistant"
  66. ) -> None:
  67. if not state.conversation or state.conversation[-1].role != role:
  68. state.conversation.append(ServeMessage(role=role, parts=[part]))
  69. else:
  70. state.conversation[-1].parts.append(part)
  71. if state.added_systext is False and sys_text_input:
  72. state.added_systext = True
  73. append_to_chat_ctx(ServeTextPart(text=sys_text_input), role="system")
  74. if text_input:
  75. append_to_chat_ctx(ServeTextPart(text=text_input), role="user")
  76. audio_data = None
  77. result_audio = b""
  78. async for event in agent.stream(
  79. sys_audio_data,
  80. audio_data,
  81. sr,
  82. 1,
  83. chat_ctx={
  84. "messages": state.conversation,
  85. "added_sysaudio": state.added_sysaudio,
  86. },
  87. ):
  88. if event.type == FishE2EEventType.USER_CODES:
  89. append_to_chat_ctx(ServeVQPart(codes=event.vq_codes), role="user")
  90. elif event.type == FishE2EEventType.SPEECH_SEGMENT:
  91. append_to_chat_ctx(ServeVQPart(codes=event.vq_codes))
  92. yield state.get_history(), wav_chunk_header() + event.frame.data, None, None
  93. elif event.type == FishE2EEventType.TEXT_SEGMENT:
  94. append_to_chat_ctx(ServeTextPart(text=event.text))
  95. yield state.get_history(), None, None, None
  96. yield state.get_history(), None, None, None
  97. async def process_text_input(
  98. sys_audio_input, sys_text_input, state: ChatState, text_input: str
  99. ):
  100. async for event in process_audio_input(
  101. sys_audio_input, sys_text_input, None, state, text_input
  102. ):
  103. yield event
  104. def create_demo():
  105. with gr.Blocks() as demo:
  106. state = gr.State(ChatState())
  107. with gr.Row():
  108. # Left column (70%) for chatbot and notes
  109. with gr.Column(scale=7):
  110. chatbot = gr.Chatbot(
  111. [],
  112. elem_id="chatbot",
  113. bubble_full_width=False,
  114. height=600,
  115. type="messages",
  116. )
  117. # notes = gr.Markdown(
  118. # """
  119. # # Fish Agent
  120. # 1. 此Demo为Fish Audio自研端到端语言模型Fish Agent 3B版本.
  121. # 2. 你可以在我们的官方仓库找到代码以及权重,但是相关内容全部基于 CC BY-NC-SA 4.0 许可证发布.
  122. # 3. Demo为早期灰度测试版本,推理速度尚待优化.
  123. # # 特色
  124. # 1. 该模型自动集成ASR与TTS部分,不需要外挂其它模型,即真正的端到端,而非三段式(ASR+LLM+TTS).
  125. # 2. 模型可以使用reference audio控制说话音色.
  126. # 3. 可以生成具有较强情感与韵律的音频.
  127. # """
  128. # )
  129. notes = gr.Markdown(
  130. """
  131. # Fish Agent
  132. 1. This demo is Fish Audio's self-researh end-to-end language model, Fish Agent version 3B.
  133. 2. You can find the code and weights in our official repo in [gitub](https://github.com/fishaudio/fish-speech) and [hugging face](https://huggingface.co/fishaudio/fish-agent-v0.1-3b), but the content is released under a CC BY-NC-SA 4.0 licence.
  134. 3. The demo is an early alpha test version, the inference speed needs to be optimised.
  135. # Features
  136. 1. The model automatically integrates ASR and TTS parts, no need to plug-in other models, i.e., true end-to-end, not three-stage (ASR+LLM+TTS).
  137. 2. The model can use reference audio to control the speech timbre.
  138. 3. The model can generate speech with strong emotion.
  139. """
  140. )
  141. # Right column (30%) for controls
  142. with gr.Column(scale=3):
  143. sys_audio_input = gr.Audio(
  144. sources=["upload"],
  145. type="numpy",
  146. label="Give a timbre for your assistant",
  147. )
  148. sys_text_input = gr.Textbox(
  149. label="What is your assistant's role?",
  150. value="You are a voice assistant created by Fish Audio, offering end-to-end voice interaction for a seamless user experience. You are required to first transcribe the user's speech, then answer it in the following format: 'Question: [USER_SPEECH]\n\nAnswer: [YOUR_RESPONSE]\n'. You are required to use the following voice in this conversation.",
  151. type="text",
  152. )
  153. audio_input = gr.Audio(
  154. sources=["microphone"], type="numpy", label="Speak your message"
  155. )
  156. text_input = gr.Textbox(label="Or type your message", type="text")
  157. output_audio = gr.Audio(
  158. label="Assistant's Voice",
  159. streaming=True,
  160. autoplay=True,
  161. interactive=False,
  162. )
  163. send_button = gr.Button("Send", variant="primary")
  164. clear_button = gr.Button("Clear")
  165. # Event handlers
  166. audio_input.stop_recording(
  167. process_audio_input,
  168. inputs=[sys_audio_input, sys_text_input, audio_input, state, text_input],
  169. outputs=[chatbot, output_audio, audio_input, text_input],
  170. show_progress=True,
  171. )
  172. send_button.click(
  173. process_text_input,
  174. inputs=[sys_audio_input, sys_text_input, state, text_input],
  175. outputs=[chatbot, output_audio, audio_input, text_input],
  176. show_progress=True,
  177. )
  178. text_input.submit(
  179. process_text_input,
  180. inputs=[sys_audio_input, sys_text_input, state, text_input],
  181. outputs=[chatbot, output_audio, audio_input, text_input],
  182. show_progress=True,
  183. )
  184. clear_button.click(
  185. clear_fn,
  186. inputs=[],
  187. outputs=[chatbot, state, audio_input, output_audio, text_input],
  188. )
  189. return demo
  190. if __name__ == "__main__":
  191. demo = create_demo()
  192. demo.launch(server_name="127.0.0.1", server_port=7860, share=True)