Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
import torch | |
import torch.nn.functional as F | |
# Load model and tokenizer | |
model_path = "mjpsm/excuses-classifier-model" | |
tokenizer = AutoTokenizer.from_pretrained(model_path) | |
model = AutoModelForSequenceClassification.from_pretrained(model_path) | |
id2label = model.config.id2label | |
# Prediction function | |
def classify_excuse(text): | |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
probs = F.softmax(outputs.logits, dim=1) | |
pred_id = torch.argmax(probs, dim=1).item() | |
confidence = probs[0][pred_id].item() | |
label = id2label[pred_id] | |
return f"Prediction: {label}", f"Confidence: {confidence:.4f}" | |
# Gradio Interface | |
interface = gr.Interface( | |
fn=classify_excuse, | |
inputs=gr.Textbox(lines=4, placeholder="Enter your Zoom excuse here..."), | |
outputs=["text", "text"], | |
title="🧠 Zoom Excuse Classifier", | |
description="Classify a Zoom excuse as either 'reasonable' or 'unreasonable' using a fine-tuned AI model." | |
) | |
# Launch app | |
if __name__ == "__main__": | |
interface.launch() | |