Ollama_TTS_RVC / app.py
MoiMoi-01's picture
Update app.py
f7d5472 verified
raw
history blame
1.9 kB
import os
import subprocess
import gradio as gr
import ollama
# Ensure Ollama is installed
def install_ollama():
try:
subprocess.run(["ollama", "--version"], check=True)
print("βœ… Ollama is already installed.")
except FileNotFoundError:
print("πŸš€ Installing Ollama...")
subprocess.run(["curl", "-fsSL", "https://ollama.com/install.sh", "|", "sh"], shell=True, check=True)
print("βœ… Ollama installed successfully!")
# Start Ollama if it's not running
def start_ollama():
try:
subprocess.run(["pgrep", "-f", "ollama"], check=True)
print("βœ… Ollama is already running.")
except subprocess.CalledProcessError:
print("πŸš€ Starting Ollama server...")
subprocess.Popen(["ollama", "serve"])
print("βœ… Ollama started.")
# Ensure model is downloaded to models/ folder
MODEL_NAME = "deepseek-llm-7b"
MODEL_PATH = f"models/{MODEL_NAME}"
def download_model():
if not os.path.exists(MODEL_PATH):
print(f"πŸš€ Downloading model: {MODEL_NAME} to {MODEL_PATH} ...")
os.makedirs("models", exist_ok=True)
subprocess.run(["ollama", "pull", f"deepseek/{MODEL_NAME}"], check=True)
print(f"βœ… Model downloaded to {MODEL_PATH}.")
else:
print(f"βœ… Model {MODEL_NAME} already exists.")
# Generate AI response using Ollama
def chat_response(user_input):
response = ollama.chat(model=MODEL_NAME, messages=[{"role": "user", "content": user_input}])
return response['message']['content']
# Run setup
install_ollama()
start_ollama()
download_model()
# Create Gradio Interface
iface = gr.Interface(
fn=chat_response,
inputs="text",
outputs="text",
title="DeepSeek ChatBot (Ollama)",
description="Chat with DeepSeek LLM 7B using Ollama."
)
# Launch Gradio App
if __name__ == "__main__":
iface.launch(server_name="0.0.0.0", server_port=7860)