import gradio as gr from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch import torch.nn.functional as F # Load model and tokenizer model_path = "mjpsm/confidence-classifier-updated" tokenizer = AutoTokenizer.from_pretrained(model_path) model = AutoModelForSequenceClassification.from_pretrained(model_path) id2label = model.config.id2label # Prediction function def classify_confidence(statement): inputs = tokenizer(statement, return_tensors="pt", truncation=True) with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits probs = F.softmax(logits, dim=1) pred_id = torch.argmax(probs, dim=1).item() label = id2label[pred_id] confidence_score = probs[0][pred_id].item() return f"🧠 Prediction: {label} ({confidence_score:.2%} confidence)" # Create Gradio interface iface = gr.Interface( fn=classify_confidence, inputs=gr.Textbox(lines=4, placeholder="Enter a statement..."), outputs="text", title="🧠 Confidence Statement Classifier", description="Enter a statement to classify its level of confidence using a fine-tuned AI model." ) if __name__ == "__main__": iface.launch()