Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import torch | |
| import torch.nn.functional as F | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline, DistilBertTokenizer, DistilBertForSequenceClassification | |
| # ---------------- Original Sarcasm + Sentiment Models ---------------- | |
| sarcasm_model = AutoModelForSequenceClassification.from_pretrained("dnzblgn/Sarcasm-Detection-Customer-Reviews") | |
| sarcasm_tokenizer = AutoTokenizer.from_pretrained("dnzblgn/Sarcasm-Detection-Customer-Reviews", use_fast=False) | |
| sentiment_model = AutoModelForSequenceClassification.from_pretrained("dnzblgn/Sentiment-Analysis-Customer-Reviews") | |
| sentiment_tokenizer = AutoTokenizer.from_pretrained("dnzblgn/Sentiment-Analysis-Customer-Reviews", use_fast=False) | |
| def analyze_sentiment(sentence): | |
| inputs = sentiment_tokenizer(sentence, return_tensors="pt", truncation=True, padding=True, max_length=512) | |
| with torch.no_grad(): | |
| outputs = sentiment_model(**inputs) | |
| logits = outputs.logits | |
| predicted_class = torch.argmax(logits, dim=-1).item() | |
| sentiment_mapping = {1: "Negative", 0: "Positive"} | |
| return sentiment_mapping[predicted_class] | |
| def detect_sarcasm(sentence): | |
| inputs = sarcasm_tokenizer(sentence, return_tensors="pt", truncation=True, padding=True, max_length=512) | |
| with torch.no_grad(): | |
| outputs = sarcasm_model(**inputs) | |
| logits = outputs.logits | |
| predicted_class = torch.argmax(logits, dim=-1).item() | |
| return "Sarcasm" if predicted_class == 1 else "Not Sarcasm" | |
| def process_text_pipeline(text): | |
| sentences = text.split("\n") | |
| processed_sentences = [] | |
| for sentence in sentences: | |
| sentence = sentence.strip() | |
| if not sentence: | |
| continue | |
| sentiment = analyze_sentiment(sentence) | |
| if sentiment == "Negative": | |
| processed_sentences.append(f"β '{sentence}' -> Sentiment: Negative") | |
| else: | |
| sarcasm_result = detect_sarcasm(sentence) | |
| if sarcasm_result == "Sarcasm": | |
| processed_sentences.append(f"β οΈ '{sentence}' -> Sentiment: Negative (Sarcastic Positive)") | |
| else: | |
| processed_sentences.append(f"β '{sentence}' -> Sentiment: Positive") | |
| return "\n".join(processed_sentences) | |
| # ---------------- Additional Sentiment Models (No Sarcasm) ---------------- | |
| # Pre-load tokenizers + models for safety | |
| additional_models = { | |
| "siebert/sentiment-roberta-large-english": { | |
| "tokenizer": AutoTokenizer.from_pretrained("siebert/sentiment-roberta-large-english"), | |
| "model": AutoModelForSequenceClassification.from_pretrained("siebert/sentiment-roberta-large-english") | |
| }, | |
| "assemblyai/bert-large-uncased-sst2": { | |
| "tokenizer": AutoTokenizer.from_pretrained("assemblyai/bert-large-uncased-sst2"), | |
| "model": AutoModelForSequenceClassification.from_pretrained("assemblyai/bert-large-uncased-sst2") | |
| }, | |
| "j-hartmann/sentiment-roberta-large-english-3-classes": { | |
| "tokenizer": AutoTokenizer.from_pretrained("j-hartmann/sentiment-roberta-large-english-3-classes"), | |
| "model": AutoModelForSequenceClassification.from_pretrained("j-hartmann/sentiment-roberta-large-english-3-classes") | |
| }, | |
| "cardiffnlp/twitter-xlm-roberta-base-sentiment": { | |
| "tokenizer": AutoTokenizer.from_pretrained("cardiffnlp/twitter-xlm-roberta-base-sentiment"), | |
| "model": AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-xlm-roberta-base-sentiment") | |
| }, | |
| "sohan-ai/sentiment-analysis-model-amazon-reviews": { | |
| "tokenizer": DistilBertTokenizer.from_pretrained("distilbert-base-uncased"), | |
| "model": DistilBertForSequenceClassification.from_pretrained("sohan-ai/sentiment-analysis-model-amazon-reviews") | |
| } | |
| } | |
| def run_sentiment_with_selected_model(text, model_name): | |
| model_info = additional_models[model_name] | |
| tokenizer = model_info["tokenizer"] | |
| model = model_info["model"] | |
| inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| logits = outputs.logits | |
| probs = F.softmax(logits, dim=-1) | |
| pred = torch.argmax(probs, dim=-1).item() | |
| # Custom label mapping | |
| label_map = { | |
| "assemblyai/bert-large-uncased-sst2": {0: "Negative", 1: "Positive"}, | |
| "sohan-ai/sentiment-analysis-model-amazon-reviews": {0: "Negative", 1: "Positive"}, | |
| } | |
| if model_name in label_map: | |
| label = label_map[model_name][pred] | |
| elif model.config.id2label: | |
| label = model.config.id2label.get(pred, f"LABEL_{pred}") | |
| else: | |
| label = f"LABEL_{pred}" | |
| emoji = "β " if "positive" in label.lower() else "β" if "negative" in label.lower() else "β οΈ" | |
| # Add confidence score | |
| confidence = probs[0][pred].item() * 100 | |
| return f"{emoji} '{text}' -> {label} ({confidence:.1f}%)" | |
| # ---------------- Gradio UI ---------------- | |
| background_css = """ | |
| .gradio-container { | |
| background-image: url('https://huggingface.co/spaces/dnzblgn/Sarcasm_Detection/resolve/main/image.png'); | |
| background-size: cover; | |
| background-position: center; | |
| color: white; | |
| } | |
| .gr-input, .gr-textbox { | |
| background-color: rgba(255, 255, 255, 0.3) !important; | |
| border-radius: 10px; | |
| padding: 10px; | |
| color: black !important; | |
| } | |
| h1, h2, p { | |
| text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.8); | |
| } | |
| """ | |
| with gr.Blocks(css=background_css) as interface: | |
| gr.Markdown( | |
| """ | |
| <h1 style='text-align: center; font-size: 36px;'>π Sentiment Analysis Powered by Sarcasm Detection π</h1> | |
| <p style='text-align: center; font-size: 18px;'>Analyze the sentiment of customer reviews and detect sarcasm in positive reviews.</p> | |
| """ | |
| ) | |
| with gr.Tab("Text Input"): | |
| with gr.Row(): | |
| text_input = gr.Textbox(lines=10, label="Enter Sentences", placeholder="Enter one or more sentences, each on a new line.") | |
| result_output = gr.Textbox(label="Results", lines=10, interactive=False) | |
| analyze_button = gr.Button("π Analyze") | |
| analyze_button.click(process_text_pipeline, inputs=text_input, outputs=result_output) | |
| with gr.Tab("Upload Text File"): | |
| file_input = gr.File(label="Upload Text File") | |
| file_output = gr.Textbox(label="Results", lines=10, interactive=False) | |
| def process_file(file): | |
| text = file.read().decode("utf-8") | |
| return process_text_pipeline(text) | |
| file_input.change(process_file, inputs=file_input, outputs=file_output) | |
| with gr.Tab("Try Other Sentiment Models"): | |
| with gr.Row(): | |
| other_model_selector = gr.Dropdown( | |
| choices=list(additional_models.keys()), | |
| label="Choose a Sentiment Model" | |
| ) | |
| with gr.Row(): | |
| model_text_input = gr.Textbox(lines=5, label="Enter Sentence") | |
| model_result_output = gr.Textbox(label="Sentiment", lines=3, interactive=False) | |
| run_model_btn = gr.Button("Run") | |
| run_model_btn.click(run_sentiment_with_selected_model, inputs=[model_text_input, other_model_selector], outputs=model_result_output) | |
| # ---------------- Run App ---------------- | |
| if __name__ == "__main__": | |
| interface.launch() | |