Vadim Borisov
Update app.py
a5c3607 verified
raw
history blame
2.11 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
# Load model and tokenizer
model_name = "tabularisai/robust-sentiment-analysis"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
# Move model to GPU if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
# Function to predict sentiment
def predict_sentiment(text):
inputs = tokenizer(text.lower(), return_tensors="pt", truncation=True, padding=True, max_length=512)
inputs = {k: v.to(device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model(**inputs)
probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
predicted_class = torch.argmax(probabilities, dim=-1).item()
sentiment_map = {0: "Very Negative", 1: "Negative", 2: "Neutral", 3: "Positive", 4: "Very Positive"}
confidence = probabilities[0][predicted_class].item()
return sentiment_map[predicted_class], f"{confidence:.2%}"
# Gradio interface
def gradio_sentiment_analysis(text):
sentiment, confidence = predict_sentiment(text)
return f"Sentiment: {sentiment}\nConfidence: {confidence}"
# Create Gradio interface
iface = gr.Interface(
fn=gradio_sentiment_analysis,
inputs=gr.Textbox(lines=5, label="Enter text for sentiment analysis"),
outputs=gr.Textbox(label="Result"),
title="Sentiment Analysis",
description="Analyze the sentiment of your text using a 5-class sentiment model.",
theme="huggingface",
examples=[
["I absolutely loved this movie! The acting was superb and the plot was engaging."],
["The service at this restaurant was terrible. I'll never go back."],
["The product works as expected. Nothing special, but it gets the job done."],
["I'm somewhat disappointed with my purchase. It's not as good as I hoped."],
["This book changed my life! I couldn't put it down and learned so much."]
]
)
# Launch the app
iface.launch()