File size: 2,560 Bytes
7dfa69b
 
 
 
 
 
 
2f3e5ba
48654b3
2f3e5ba
cdf1509
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e178bb5
cdf1509
 
 
 
7dfa69b
cdf1509
e178bb5
 
 
cdf1509
 
e178bb5
cdf1509
 
 
 
 
7dfa69b
 
e178bb5
7dfa69b
 
 
cdf1509
7dfa69b
 
cdf1509
7dfa69b
cdf1509
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load models and tokenizers
sarcasm_model = AutoModelForSequenceClassification.from_pretrained("dnzblgn/Sarcasm-Detection-Customer-Reviews")
sentiment_model = AutoModelForSequenceClassification.from_pretrained("dnzblgn/Sentiment-Analysis-Customer-Reviews")
sarcasm_tokenizer = AutoTokenizer.from_pretrained("microsoft/deberta-v3-base", use_fast=False)
sentiment_tokenizer = AutoTokenizer.from_pretrained("FacebookAI/roberta-base", use_fast=False)

# Function to analyze sentiment
def analyze_sentiment(sentence):
    inputs = sentiment_tokenizer(sentence, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = sentiment_model(**inputs)
    logits = outputs.logits
    predicted_class = torch.argmax(logits, dim=-1).item()
    sentiment_mapping = {1: "Negative", 0: "Positive"}
    return sentiment_mapping[predicted_class]

# Function to detect sarcasm
def detect_sarcasm(sentence):
    inputs = sarcasm_tokenizer(sentence, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = sarcasm_model(**inputs)
    logits = outputs.logits
    predicted_class = torch.argmax(logits, dim=-1).item()
    return "Sarcasm" if predicted_class == 1 else "Not Sarcasm"

# Combined function for processing sentences
def process_text_pipeline(text):
    sentences = text.split("\n")  # Split text into multiple sentences
    processed_sentences = []
    
    for sentence in sentences:
        sentiment = analyze_sentiment(sentence.strip())
        if sentiment == "Negative":
            processed_sentences.append(f"'{sentence}' -> Sentiment: Negative")
        else:
            sarcasm_result = detect_sarcasm(sentence.strip())
            if sarcasm_result == "Sarcasm":
                processed_sentences.append(f"'{sentence}' -> Sentiment: Negative (Sarcastic Positive)")
            else:
                processed_sentences.append(f"'{sentence}' -> Sentiment: Positive")
    
    return "\n".join(processed_sentences)

# Gradio UI
interface = gr.Interface(
    fn=process_text_pipeline,
    inputs=gr.Textbox(lines=10, placeholder="Enter one or more sentences, each on a new line."),
    outputs="text",
    title="Sarcasm Detection for Customer Reviews",
    description="This web app analyzes customer reviews for sentiment and detects sarcasm for positive reviews.",
)

# Run the interface
if __name__ == "__main__":
    interface.launch()