nazimali's picture
Update app.py
d7c5a1e verified
import spaces
import torch
import gradio as gr
from huggingface_hub import snapshot_download
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
model = None
model_id = "nazimali/Mistral-Nemo-Kurdish-Instruct"
infer_prompt = """Li jêr rêwerzek heye ku peywirek rave dike, bi têketinek ku çarçoveyek din peyda dike ve tê hev kirin. Bersivek ku daxwazê ​​bi guncan temam dike binivîsin.
### Telîmat:
{}
### Têketin:
{}
### Bersiv:
"""
snapshot_download("nazimali/Mistral-Nemo-Kurdish")
snapshot_download(repo_id=model_id, ignore_patterns=["*.gguf"])
@spaces.GPU
def respond(
message,
history: list[tuple[str, str]],
):
global model, tokenizer
if model is None:
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
)
model = AutoModelForCausalLM.from_pretrained(
model_id,
quantization_config=bnb_config,
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
model.eval()
prompt = infer_prompt.format("tu arîkarek alîkar î", message)
input_ids = tokenizer(
prompt,
return_tensors="pt",
add_special_tokens=False,
return_token_type_ids=False,
).to("cuda")
with torch.inference_mode():
generated_ids = model.generate(
**input_ids,
max_new_tokens=120,
do_sample=True,
temperature=0.7,
top_p=0.7,
num_return_sequences=1,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id,
)
decoded_output = tokenizer.batch_decode(generated_ids)[0]
return decoded_output.replace(prompt, "").replace("</s>", "")
demo = gr.ChatInterface(respond, type="messages", examples=["سڵاو ئەلیکوم، چۆنیت؟", "Selam alikum, tu çawa yî?", "Peace be upon you, how are you?"], title="Mistral Nemo Kurdish Instruct")
if __name__ == "__main__":
demo.launch()