File size: 4,214 Bytes
7dfa69b
 
 
 
 
 
 
fb35de0
 
2f3e5ba
cdf1509
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e178bb5
cdf1509
 
 
635a223
7dfa69b
635a223
 
 
 
 
e178bb5
635a223
e178bb5
635a223
cdf1509
635a223
cdf1509
635a223
 
cdf1509
 
bbd99ff
62b7f2c
 
 
 
 
 
 
bbd99ff
 
 
 
 
 
 
 
 
 
 
62b7f2c
 
bbd99ff
62b7f2c
635a223
 
bbd99ff
62b7f2c
635a223
 
62b7f2c
635a223
 
 
 
 
 
 
 
bbd99ff
635a223
 
 
 
 
 
 
 
 
 
 
 
 
cdf1509
7dfa69b
635a223
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load models and tokenizers
sarcasm_model = AutoModelForSequenceClassification.from_pretrained("dnzblgn/Sarcasm-Detection-Customer-Reviews")
sentiment_model = AutoModelForSequenceClassification.from_pretrained("dnzblgn/Sentiment-Analysis-Customer-Reviews")
sarcasm_tokenizer = AutoTokenizer.from_pretrained("dnzblgn/Sarcasm-Detection-Customer-Reviews", use_fast=False)
sentiment_tokenizer = AutoTokenizer.from_pretrained("dnzblgn/Sentiment-Analysis-Customer-Reviews", use_fast=False)

# Function to analyze sentiment
def analyze_sentiment(sentence):
    inputs = sentiment_tokenizer(sentence, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = sentiment_model(**inputs)
    logits = outputs.logits
    predicted_class = torch.argmax(logits, dim=-1).item()
    sentiment_mapping = {1: "Negative", 0: "Positive"}
    return sentiment_mapping[predicted_class]

# Function to detect sarcasm
def detect_sarcasm(sentence):
    inputs = sarcasm_tokenizer(sentence, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = sarcasm_model(**inputs)
    logits = outputs.logits
    predicted_class = torch.argmax(logits, dim=-1).item()
    return "Sarcasm" if predicted_class == 1 else "Not Sarcasm"

# Combined function for processing sentences
def process_text_pipeline(text):
    sentences = text.split("\n")  # Split text into multiple sentences
    processed_sentences = []

    for sentence in sentences:
        sentence = sentence.strip()
        if not sentence:
            continue  # Skip empty lines

        sentiment = analyze_sentiment(sentence)
        if sentiment == "Negative":
            processed_sentences.append(f"❌ '{sentence}' -> Sentiment: Negative")
        else:
            sarcasm_result = detect_sarcasm(sentence)
            if sarcasm_result == "Sarcasm":
                processed_sentences.append(f"⚠️ '{sentence}' -> Sentiment: Negative (Sarcastic Positive)")
            else:
                processed_sentences.append(f"βœ… '{sentence}' -> Sentiment: Positive")

    return "\n".join(processed_sentences)

# CSS for transparent boxes and background image
background_css = """
.gradio-container {
    background-image: url('https://huggingface.co/spaces/dnzblgn/Sarcasm_Detection/resolve/main/image.png');
    background-size: cover;
    background-position: center;
    color: white;
}

.gr-input, .gr-textbox {
    background-color: rgba(255, 255, 255, 0.3) !important;  /* Make the boxes transparent */
    border-radius: 10px;
    padding: 10px;
    color: black !important;
}

h1, h2, p {
    text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.8);  /* Add subtle shadow to text for better readability */
}
"""

# Gradio UI with updated header and transparent design
with gr.Blocks(css=background_css) as interface:
    gr.Markdown(
        """
        <h1 style='text-align: center; font-size: 36px;'>🌟 Sentiment Analysis Powered by Sarcasm Detection 🌟</h1>
        <p style='text-align: center; font-size: 18px;'>Analyze the sentiment of customer reviews and detect sarcasm in positive reviews.</p>
        """
    )

    with gr.Tab("Text Input"):
        with gr.Row():
            text_input = gr.Textbox(
                lines=10,
                label="Enter Sentences",
                placeholder="Enter one or more sentences, each on a new line."
            )
            result_output = gr.Textbox(label="Results", lines=10, interactive=False)
        analyze_button = gr.Button("πŸ” Analyze")

        analyze_button.click(process_text_pipeline, inputs=text_input, outputs=result_output)

    with gr.Tab("Upload Text File"):
        file_input = gr.File(label="Upload Text File")
        file_output = gr.Textbox(label="Results", lines=10, interactive=False)

        def process_file(file):
            text = file.read().decode("utf-8")
            return process_text_pipeline(text)

        file_input.change(process_file, inputs=file_input, outputs=file_output)

# Run the interface
if __name__ == "__main__":
    interface.launch()