Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
import torch | |
# Load models and tokenizers | |
sarcasm_model = AutoModelForSequenceClassification.from_pretrained("dnzblgn/Sarcasm-Detection-Customer-Reviews") | |
sentiment_model = AutoModelForSequenceClassification.from_pretrained("dnzblgn/Sentiment-Analysis-Customer-Reviews") | |
sarcasm_tokenizer = AutoTokenizer.from_pretrained("dnzblgn/Sarcasm-Detection-Customer-Reviews", use_fast=False) | |
sentiment_tokenizer = AutoTokenizer.from_pretrained("dnzblgn/Sentiment-Analysis-Customer-Reviews", use_fast=False) | |
# Function to analyze sentiment | |
def analyze_sentiment(sentence): | |
inputs = sentiment_tokenizer(sentence, return_tensors="pt", truncation=True, padding=True, max_length=512) | |
with torch.no_grad(): | |
outputs = sentiment_model(**inputs) | |
logits = outputs.logits | |
predicted_class = torch.argmax(logits, dim=-1).item() | |
sentiment_mapping = {1: "Negative", 0: "Positive"} | |
return sentiment_mapping[predicted_class] | |
# Function to detect sarcasm | |
def detect_sarcasm(sentence): | |
inputs = sarcasm_tokenizer(sentence, return_tensors="pt", truncation=True, padding=True, max_length=512) | |
with torch.no_grad(): | |
outputs = sarcasm_model(**inputs) | |
logits = outputs.logits | |
predicted_class = torch.argmax(logits, dim=-1).item() | |
return "Sarcasm" if predicted_class == 1 else "Not Sarcasm" | |
# Combined function for processing sentences | |
def process_text_pipeline(text): | |
sentences = text.split("\n") # Split text into multiple sentences | |
processed_sentences = [] | |
for sentence in sentences: | |
sentence = sentence.strip() | |
if not sentence: | |
continue # Skip empty lines | |
sentiment = analyze_sentiment(sentence) | |
if sentiment == "Negative": | |
processed_sentences.append(f"β '{sentence}' -> Sentiment: Negative") | |
else: | |
sarcasm_result = detect_sarcasm(sentence) | |
if sarcasm_result == "Sarcasm": | |
processed_sentences.append(f"β οΈ '{sentence}' -> Sentiment: Negative (Sarcastic Positive)") | |
else: | |
processed_sentences.append(f"β '{sentence}' -> Sentiment: Positive") | |
return "\n".join(processed_sentences) | |
# Improved Gradio UI | |
with gr.Blocks(css=".gradio-container {background-color: #f3f4f6; color: #333; font-family: 'Arial';}") as interface: | |
gr.Markdown( | |
""" | |
<h1 style='text-align: center; font-size: 36px;'>π Sarcasm Detection for Customer Reviews π</h1> | |
<p style='text-align: center; font-size: 18px;'>This web app analyzes customer reviews for sentiment and detects sarcasm in positive reviews.</p> | |
""" | |
) | |
with gr.Tab("Text Input"): | |
with gr.Row(): | |
text_input = gr.Textbox( | |
lines=10, | |
label="Enter Sentences", | |
placeholder="Enter one or more sentences, each on a new line." | |
) | |
result_output = gr.Textbox(label="Results", lines=10, interactive=False) | |
analyze_button = gr.Button("π Analyze", elem_id="analyze-button") | |
analyze_button.click(process_text_pipeline, inputs=text_input, outputs=result_output) | |
with gr.Tab("Upload Text File"): | |
file_input = gr.File(label="Upload Text File") | |
file_output = gr.Textbox(label="Results", lines=10, interactive=False) | |
def process_file(file): | |
text = file.read().decode("utf-8") | |
return process_text_pipeline(text) | |
file_input.change(process_file, inputs=file_input, outputs=file_output) | |
gr.Markdown( | |
""" | |
<p style='text-align: center;'>Made with β€οΈ by <a href='https://huggingface.co/dnzblgn' target='_blank'>dnzblgn</a></p> | |
""" | |
) | |
# Run the interface | |
if __name__ == "__main__": | |
interface.launch() | |