from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased") def classify_text(text): inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits predicted_class = torch.argmax(logits, dim=1).item() return f"Predicted class: {predicted_class}" import gradio as gr interface = gr.Interface( fn=classify_text, inputs="text", outputs="text", title="BERT Text Classifier" ) interface.launch()