pranjalpandey's picture
Update app.py
74123b1 verified
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer
from huggingface_hub import login
import gradio as gr
from dotenv import load_dotenv
import os
load_dotenv()
HF_TOKEN = os.getenv("HF_TOKEN")
# whoami(token=HF_TOKEN)
config = PeftConfig.from_pretrained("pranjalpandey/gemma-open-instruct")
model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", token=HF_TOKEN)
model = PeftModel.from_pretrained(model, "pranjalpandey/gemma-open-instruct")
# model = AutoPeftModelForCausalLM.from_pretrained("pranjalpandey/llama-7b-finetuned-dialogue-summarizer")
tokenizer = AutoTokenizer.from_pretrained("pranjalpandey/gemma-open-instruct", token=HF_TOKEN)
# model = model.to("cuda")
def response(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=100)
return tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0].split("# Response:")[1].strip()
ir = gr.Interface(
fn=response,
inputs=["text"],
outputs=["text"],
)
ir.launch()