Ollama_TTS_RVC / app.py
MoiMoi-01's picture
Update app.py
7ac8bf6 verified
raw
history blame
1.46 kB
import os
import subprocess
import gradio as gr
import ollama
# Ensure ollama is installed via pip
def install_ollama():
try:
import ollama
print("βœ… Ollama is already installed.")
except ImportError:
print("πŸš€ Installing Ollama via pip...")
subprocess.run(["pip", "install", "ollama"], check=True)
import ollama # Re-import after installation
print("βœ… Ollama installed successfully!")
# Ensure model is downloaded
MODEL_NAME = "deepseek-llm-7b"
MODEL_PATH = f"models/{MODEL_NAME}"
def download_model():
models = ollama.list()
if any(m["name"] == MODEL_NAME for m in models["models"]):
print(f"βœ… Model '{MODEL_NAME}' is already available.")
else:
print(f"πŸš€ Downloading model: {MODEL_NAME} ...")
ollama.pull(MODEL_NAME)
print(f"βœ… Model '{MODEL_NAME}' downloaded successfully.")
# Generate AI response using Ollama
def chat_response(user_input):
response = ollama.chat(model=MODEL_NAME, messages=[{"role": "user", "content": user_input}])
return response['message']['content']
# Run setup
install_ollama()
download_model()
# Create Gradio Interface
iface = gr.Interface(
fn=chat_response,
inputs="text",
outputs="text",
title="DeepSeek ChatBot (Ollama)",
description="Chat with DeepSeek LLM 7B using Ollama."
)
# Launch Gradio App
if __name__ == "__main__":
iface.launch(server_name="0.0.0.0", server_port=7860)