Spaces:
Sleeping
Sleeping
import os | |
import torch | |
from llama_cpp import Llama | |
import gradio as gr | |
from huggingface_hub import login, hf_hub_download | |
# 🔹 Načteme API token bezpečně ze "Secrets" | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
# Přihlásíme se k Hugging Face API | |
login(HF_TOKEN) | |
# 🔹 Stáhneme model | |
MODEL_NAME = "TheBloke/Mistral-7B-Instruct-v0.2-code-ft-GGUF" | |
MODEL_FILE = "mistral-7b-instruct-v0.2-code-ft.Q4_K_S.gguf" | |
MODEL_PATH = f"./{MODEL_FILE}" | |
if not os.path.exists(MODEL_PATH): | |
MODEL_PATH = hf_hub_download(repo_id=MODEL_NAME, filename=MODEL_FILE, token=HF_TOKEN) | |
# 🔹 Načteme model do RAM | |
llm = Llama(model_path=MODEL_PATH, n_ctx=1024, n_threads=8, verbose=False) | |
def chat(user_input): | |
""" 🗣️ AI odpovídá na dotazy """ | |
response = llm(user_input, max_tokens=256, temperature=0.7) | |
return response["choices"][0]["text"] | |
# 🔹 Vytvoříme jednoduché UI | |
interface = gr.Interface(fn=chat, inputs="text", outputs="text", title="🤖 Česká AI zdarma!") | |
# 🔹 Spustíme server | |
interface.launch() |