File size: 857 Bytes
6736b43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import os
import torch
from llama_cpp import Llama
import gradio as gr

# 🔹 Stáhneme model, pokud není dostupný
MODEL_NAME = "TheBloke/Mistral-7B-Instruct-GGUF"
MODEL_FILE = "Mistral-7B-Instruct-Q4_K_M.gguf"
MODEL_PATH = f"./{MODEL_FILE}"

if not os.path.exists(MODEL_PATH):
    from huggingface_hub import hf_hub_download
    MODEL_PATH = hf_hub_download(repo_id=MODEL_NAME, filename=MODEL_FILE)

# 🔹 Načteme model do RAM
llm = Llama(model_path=MODEL_PATH, n_ctx=1024, n_threads=8, verbose=False)

def chat(user_input):
    """ 🗣️ AI odpovídá na dotazy """
    response = llm(user_input, max_tokens=256, temperature=0.7)
    return response["choices"][0]["text"]

# 🔹 Vytvoříme jednoduché UI
interface = gr.Interface(fn=chat, inputs="text", outputs="text", title="🤖 Česká AI zdarma!")

# 🔹 Spustíme server
interface.launch()