HFCompareModel / app.py
Vishwas1's picture
Update app.py
828331c verified
raw
history blame
2.37 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from huggingface_hub import login
import os
# Load text generation model
def load_model(model_name):
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
return tokenizer, model
# Models to compare
original_model_name = "Vishwas1/hummingbird-base-marathi-finetuned"
fine_tuned_model_name = "Vishwas1/hummingbird-base-marathi-finetuned-finetuned"
# Load Hugging Face token
hf_token = os.getenv('HF_API_TOKEN')
if not hf_token:
raise ValueError("Error: Hugging Face token not found. Please set it as an environment variable.")
# Login to Hugging Face Hub
login(hf_token)
# Load the original and fine-tuned models
original_tokenizer, original_model = load_model(original_model_name)
fine_tuned_tokenizer, fine_tuned_model = load_model(fine_tuned_model_name)
# Ensure models are in evaluation mode
original_model.eval()
fine_tuned_model.eval()
# Function to compare text generation from both models
def compare_models(prompt):
# Generate text with the original model
inputs_orig = original_tokenizer(prompt, return_tensors="pt")
with torch.no_grad():
generated_ids_orig = original_model.generate(inputs_orig["input_ids"], max_length=100)
generated_text_orig = original_tokenizer.decode(generated_ids_orig[0], skip_special_tokens=True)
# Generate text with the fine-tuned model
inputs_fine = fine_tuned_tokenizer(prompt, return_tensors="pt")
with torch.no_grad():
generated_ids_fine = fine_tuned_model.generate(inputs_fine["input_ids"], max_length=100)
generated_text_fine = fine_tuned_tokenizer.decode(generated_ids_fine[0], skip_special_tokens=True)
# Return the generated text from both models for comparison
result = {
"Original Model Output": generated_text_orig,
"Fine-Tuned Model Output": generated_text_fine
}
return result
# Gradio Interface
iface = gr.Interface(
fn=compare_models,
inputs=gr.Textbox(lines=5, placeholder="Enter text here...", label="Input Text"),
outputs=gr.JSON(label="Generated Texts"),
title="Compare Text Generation from Original and Fine-Tuned Models",
description="Enter a prompt to generate text from the original and fine-tuned models."
)
iface.launch()