dnzblgn's picture
Update app.py
bbd99ff verified
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
# Load models and tokenizers
sarcasm_model = AutoModelForSequenceClassification.from_pretrained("dnzblgn/Sarcasm-Detection-Customer-Reviews")
sentiment_model = AutoModelForSequenceClassification.from_pretrained("dnzblgn/Sentiment-Analysis-Customer-Reviews")
sarcasm_tokenizer = AutoTokenizer.from_pretrained("dnzblgn/Sarcasm-Detection-Customer-Reviews", use_fast=False)
sentiment_tokenizer = AutoTokenizer.from_pretrained("dnzblgn/Sentiment-Analysis-Customer-Reviews", use_fast=False)
# Function to analyze sentiment
def analyze_sentiment(sentence):
inputs = sentiment_tokenizer(sentence, return_tensors="pt", truncation=True, padding=True, max_length=512)
with torch.no_grad():
outputs = sentiment_model(**inputs)
logits = outputs.logits
predicted_class = torch.argmax(logits, dim=-1).item()
sentiment_mapping = {1: "Negative", 0: "Positive"}
return sentiment_mapping[predicted_class]
# Function to detect sarcasm
def detect_sarcasm(sentence):
inputs = sarcasm_tokenizer(sentence, return_tensors="pt", truncation=True, padding=True, max_length=512)
with torch.no_grad():
outputs = sarcasm_model(**inputs)
logits = outputs.logits
predicted_class = torch.argmax(logits, dim=-1).item()
return "Sarcasm" if predicted_class == 1 else "Not Sarcasm"
# Combined function for processing sentences
def process_text_pipeline(text):
sentences = text.split("\n") # Split text into multiple sentences
processed_sentences = []
for sentence in sentences:
sentence = sentence.strip()
if not sentence:
continue # Skip empty lines
sentiment = analyze_sentiment(sentence)
if sentiment == "Negative":
processed_sentences.append(f"❌ '{sentence}' -> Sentiment: Negative")
else:
sarcasm_result = detect_sarcasm(sentence)
if sarcasm_result == "Sarcasm":
processed_sentences.append(f"⚠️ '{sentence}' -> Sentiment: Negative (Sarcastic Positive)")
else:
processed_sentences.append(f"βœ… '{sentence}' -> Sentiment: Positive")
return "\n".join(processed_sentences)
# CSS for transparent boxes and background image
background_css = """
.gradio-container {
background-image: url('https://huggingface.co/spaces/dnzblgn/Sarcasm_Detection/resolve/main/image.png');
background-size: cover;
background-position: center;
color: white;
}
.gr-input, .gr-textbox {
background-color: rgba(255, 255, 255, 0.3) !important; /* Make the boxes transparent */
border-radius: 10px;
padding: 10px;
color: black !important;
}
h1, h2, p {
text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.8); /* Add subtle shadow to text for better readability */
}
"""
# Gradio UI with updated header and transparent design
with gr.Blocks(css=background_css) as interface:
gr.Markdown(
"""
<h1 style='text-align: center; font-size: 36px;'>🌟 Sentiment Analysis Powered by Sarcasm Detection 🌟</h1>
<p style='text-align: center; font-size: 18px;'>Analyze the sentiment of customer reviews and detect sarcasm in positive reviews.</p>
"""
)
with gr.Tab("Text Input"):
with gr.Row():
text_input = gr.Textbox(
lines=10,
label="Enter Sentences",
placeholder="Enter one or more sentences, each on a new line."
)
result_output = gr.Textbox(label="Results", lines=10, interactive=False)
analyze_button = gr.Button("πŸ” Analyze")
analyze_button.click(process_text_pipeline, inputs=text_input, outputs=result_output)
with gr.Tab("Upload Text File"):
file_input = gr.File(label="Upload Text File")
file_output = gr.Textbox(label="Results", lines=10, interactive=False)
def process_file(file):
text = file.read().decode("utf-8")
return process_text_pipeline(text)
file_input.change(process_file, inputs=file_input, outputs=file_output)
# Run the interface
if __name__ == "__main__":
interface.launch()