MoiMoi-01 commited on
Commit
50e3e8e
·
verified ·
1 Parent(s): 44e69f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -28
app.py CHANGED
@@ -3,15 +3,13 @@ from huggingface_hub import InferenceClient
3
  import torch
4
  from TTS.api import TTS
5
  import soundfile as sf
6
- import os
7
- os.system("pip install fairseq==0.12.2")
8
 
9
-
10
- # Load TTS Model (supports multiple models)
11
  tts_model = TTS("tts_models/en/ljspeech/tacotron2-DDC").to("cuda" if torch.cuda.is_available() else "cpu")
12
 
13
- # Hugging Face LLM client
14
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
15
 
16
  def respond(
17
  message,
@@ -50,26 +48,24 @@ def respond(
50
 
51
  yield response, output_audio_path # Yielding audio response
52
 
53
- # Gradio Chat Interface with Audio Output
54
- demo = gr.ChatInterface(
55
- respond,
56
- additional_inputs=[
57
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
58
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
59
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
60
- gr.Slider(
61
- minimum=0.1,
62
- maximum=1.0,
63
- value=0.95,
64
- step=0.05,
65
- label="Top-p (nucleus sampling)",
66
- ),
67
- ],
68
- outputs=[
69
- gr.Textbox(label="Generated Response"),
70
- gr.Audio(type="filepath", label="TTS Output"),
71
- ],
72
- )
73
 
74
- if __name__ == "__main__":
75
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import torch
4
  from TTS.api import TTS
5
  import soundfile as sf
 
 
6
 
7
+ # Load TTS Model
 
8
  tts_model = TTS("tts_models/en/ljspeech/tacotron2-DDC").to("cuda" if torch.cuda.is_available() else "cpu")
9
 
10
+ # Hugging Face LLM client (DeepSeek R1 7B)
11
+ client = InferenceClient("deepseek-ai/deepseek-r1-7b")
12
+
13
 
14
  def respond(
15
  message,
 
48
 
49
  yield response, output_audio_path # Yielding audio response
50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
+ # Using gr.Blocks() instead of ChatInterface
53
+ with gr.Blocks() as demo:
54
+ gr.Markdown("## DeepSeek R1 7B Chatbot with TTS")
55
+
56
+ chatbot = gr.Chatbot()
57
+ msg = gr.Textbox(label="User Input")
58
+
59
+ system_msg = gr.Textbox(value="You are a friendly Chatbot.", label="System Message")
60
+ max_tokens = gr.Slider(1, 2048, value=512, step=1, label="Max Tokens")
61
+ temperature = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature")
62
+ top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p (Nucleus Sampling)")
63
+
64
+ tts_audio = gr.Audio(type="filepath", label="TTS Output")
65
+
66
+ def chat_fn(message, history):
67
+ return respond(message, history, system_msg.value, max_tokens.value, temperature.value, top_p.value)
68
+
69
+ msg.submit(chat_fn, inputs=[msg, chatbot], outputs=[chatbot, tts_audio])
70
+
71
+ demo.launch()