MoiMoi-01 commited on
Commit
f7d5472
Β·
verified Β·
1 Parent(s): 4c352f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -79
app.py CHANGED
@@ -1,81 +1,60 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
- import torch
4
- from TTS.api import TTS
5
  import os
6
  import subprocess
7
-
8
- # Force CPU usage
9
- device = "cpu"
10
-
11
- # Load TTS Model (Running on CPU)
12
- tts_model = TTS("tts_models/en/ljspeech/tacotron2-DDC", gpu=False) # βœ… Ensures CPU-only execution
13
-
14
- # Hugging Face LLM Client (DeepSeek R1 7B)
15
- client = InferenceClient("deepseek-ai/deepseek-r1-7b")
16
-
17
- # RVC Model Paths
18
- RVC_MODEL_PATH = "zeldabotw.pth"
19
- RVC_INDEX_PATH = "zeldabotw.index"
20
-
21
- # Function to call RVC for voice conversion (CPU Mode)
22
- def convert_voice(input_wav, output_wav):
23
- """Converts input TTS audio to ZeldaBotW voice using RVC (CPU Mode)."""
24
- if not os.path.exists(RVC_MODEL_PATH) or not os.path.exists(RVC_INDEX_PATH):
25
- raise FileNotFoundError("RVC model files not found! Ensure zeldabotw.pth and zeldabotw.index are in the same directory.")
26
-
27
- command = f"python infer_rvc.py --input {input_wav} --output {output_wav} --model {RVC_MODEL_PATH} --index {RVC_INDEX_PATH} --pitch_shift 0 --device cpu"
28
-
29
- process = subprocess.run(command, shell=True, capture_output=True, text=True)
30
- if process.returncode != 0:
31
- print("RVC conversion failed:", process.stderr)
32
- return None
33
- return output_wav
34
-
35
- # Chatbot Response + TTS + RVC
36
- def respond(message, history, system_message, max_tokens, temperature, top_p):
37
- messages = [{"role": "system", "content": system_message}]
38
-
39
- for val in history:
40
- if val[0]: messages.append({"role": "user", "content": val[0]})
41
- if val[1]: messages.append({"role": "assistant", "content": val[1]})
42
-
43
- messages.append({"role": "user", "content": message})
44
-
45
- response = ""
46
-
47
- # Get LLM Response
48
- for message in client.chat_completion(messages, max_tokens=max_tokens, stream=False, temperature=temperature, top_p=top_p):
49
- response += message.choices[0].message.content
50
-
51
- # Generate Speech from Text (CPU Mode)
52
- tts_audio_path = "tts_output.wav"
53
- tts_model.tts_to_file(text=response, file_path=tts_audio_path)
54
-
55
- # Convert TTS output to ZeldaBotW voice (CPU Mode)
56
- rvc_audio_path = "rvc_output.wav"
57
- rvc_converted_path = convert_voice(tts_audio_path, rvc_audio_path)
58
-
59
- return response, tts_audio_path, rvc_converted_path # βœ… Now correctly returns all outputs
60
-
61
- # Gradio UI
62
- with gr.Blocks() as demo:
63
- gr.Markdown("## DeepSeek R1 7B Chatbot with ZeldaBotW Voice (CPU Mode)")
64
-
65
- chatbot = gr.Chatbot(type="messages") # βœ… Fix deprecated type warning
66
- msg = gr.Textbox(label="User Input")
67
-
68
- system_msg = gr.Textbox(value="You are a friendly Chatbot.", label="System Message")
69
- max_tokens = gr.Slider(1, 2048, value=512, step=1, label="Max Tokens")
70
- temperature = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature")
71
- top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p (Nucleus Sampling)")
72
-
73
- tts_audio = gr.Audio(type="filepath", label="TTS Output")
74
- rvc_audio = gr.Audio(type="filepath", label="RVC ZeldaBotW Voice")
75
-
76
- def chat_fn(message, history):
77
- return respond(message, history, system_msg.value, max_tokens.value, temperature.value, top_p.value)
78
-
79
- msg.submit(chat_fn, inputs=[msg, chatbot], outputs=[chatbot, tts_audio, rvc_audio])
80
-
81
- demo.launch()
 
 
 
 
 
1
  import os
2
  import subprocess
3
+ import gradio as gr
4
+ import ollama
5
+
6
+ # Ensure Ollama is installed
7
+ def install_ollama():
8
+ try:
9
+ subprocess.run(["ollama", "--version"], check=True)
10
+ print("βœ… Ollama is already installed.")
11
+ except FileNotFoundError:
12
+ print("πŸš€ Installing Ollama...")
13
+ subprocess.run(["curl", "-fsSL", "https://ollama.com/install.sh", "|", "sh"], shell=True, check=True)
14
+ print("βœ… Ollama installed successfully!")
15
+
16
+ # Start Ollama if it's not running
17
+ def start_ollama():
18
+ try:
19
+ subprocess.run(["pgrep", "-f", "ollama"], check=True)
20
+ print("βœ… Ollama is already running.")
21
+ except subprocess.CalledProcessError:
22
+ print("πŸš€ Starting Ollama server...")
23
+ subprocess.Popen(["ollama", "serve"])
24
+ print("βœ… Ollama started.")
25
+
26
+ # Ensure model is downloaded to models/ folder
27
+ MODEL_NAME = "deepseek-llm-7b"
28
+ MODEL_PATH = f"models/{MODEL_NAME}"
29
+
30
+ def download_model():
31
+ if not os.path.exists(MODEL_PATH):
32
+ print(f"πŸš€ Downloading model: {MODEL_NAME} to {MODEL_PATH} ...")
33
+ os.makedirs("models", exist_ok=True)
34
+ subprocess.run(["ollama", "pull", f"deepseek/{MODEL_NAME}"], check=True)
35
+ print(f"βœ… Model downloaded to {MODEL_PATH}.")
36
+ else:
37
+ print(f"βœ… Model {MODEL_NAME} already exists.")
38
+
39
+ # Generate AI response using Ollama
40
+ def chat_response(user_input):
41
+ response = ollama.chat(model=MODEL_NAME, messages=[{"role": "user", "content": user_input}])
42
+ return response['message']['content']
43
+
44
+ # Run setup
45
+ install_ollama()
46
+ start_ollama()
47
+ download_model()
48
+
49
+ # Create Gradio Interface
50
+ iface = gr.Interface(
51
+ fn=chat_response,
52
+ inputs="text",
53
+ outputs="text",
54
+ title="DeepSeek ChatBot (Ollama)",
55
+ description="Chat with DeepSeek LLM 7B using Ollama."
56
+ )
57
+
58
+ # Launch Gradio App
59
+ if __name__ == "__main__":
60
+ iface.launch(server_name="0.0.0.0", server_port=7860)