|
import gradio as gr |
|
from transformers import AutoModelForSequenceClassification, AutoTokenizer |
|
import torch |
|
|
|
|
|
model_id = "cheberle/autotrain-35swc-b4r9z" |
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
model = AutoModelForSequenceClassification.from_pretrained(model_id) |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
model = model.to(device) |
|
|
|
def predict(text): |
|
|
|
inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=512) |
|
|
|
|
|
inputs = {k: v.to(device) for k, v in inputs.items()} |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1) |
|
|
|
|
|
probs = predictions[0].tolist() |
|
labels = model.config.id2label |
|
|
|
|
|
results = {labels[i]: float(probs[i]) for i in range(len(probs))} |
|
|
|
return results |
|
|
|
|
|
iface = gr.Interface( |
|
fn=predict, |
|
inputs=gr.Textbox(label="Input Text"), |
|
outputs=gr.Label(label="Prediction"), |
|
title="Model Prediction Interface", |
|
description=f"Enter text to get predictions from {model_id}", |
|
examples=["Example text to try"] |
|
) |
|
|
|
|
|
iface.launch() |