dnzblgn commited on
Commit
831751c
Β·
verified Β·
1 Parent(s): bbd99ff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -21
app.py CHANGED
@@ -1,14 +1,15 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
  import torch
 
 
4
 
5
- # Load models and tokenizers
6
  sarcasm_model = AutoModelForSequenceClassification.from_pretrained("dnzblgn/Sarcasm-Detection-Customer-Reviews")
7
- sentiment_model = AutoModelForSequenceClassification.from_pretrained("dnzblgn/Sentiment-Analysis-Customer-Reviews")
8
  sarcasm_tokenizer = AutoTokenizer.from_pretrained("dnzblgn/Sarcasm-Detection-Customer-Reviews", use_fast=False)
 
 
9
  sentiment_tokenizer = AutoTokenizer.from_pretrained("dnzblgn/Sentiment-Analysis-Customer-Reviews", use_fast=False)
10
 
11
- # Function to analyze sentiment
12
  def analyze_sentiment(sentence):
13
  inputs = sentiment_tokenizer(sentence, return_tensors="pt", truncation=True, padding=True, max_length=512)
14
  with torch.no_grad():
@@ -18,7 +19,6 @@ def analyze_sentiment(sentence):
18
  sentiment_mapping = {1: "Negative", 0: "Positive"}
19
  return sentiment_mapping[predicted_class]
20
 
21
- # Function to detect sarcasm
22
  def detect_sarcasm(sentence):
23
  inputs = sarcasm_tokenizer(sentence, return_tensors="pt", truncation=True, padding=True, max_length=512)
24
  with torch.no_grad():
@@ -27,15 +27,14 @@ def detect_sarcasm(sentence):
27
  predicted_class = torch.argmax(logits, dim=-1).item()
28
  return "Sarcasm" if predicted_class == 1 else "Not Sarcasm"
29
 
30
- # Combined function for processing sentences
31
  def process_text_pipeline(text):
32
- sentences = text.split("\n") # Split text into multiple sentences
33
  processed_sentences = []
34
 
35
  for sentence in sentences:
36
  sentence = sentence.strip()
37
  if not sentence:
38
- continue # Skip empty lines
39
 
40
  sentiment = analyze_sentiment(sentence)
41
  if sentiment == "Negative":
@@ -49,7 +48,56 @@ def process_text_pipeline(text):
49
 
50
  return "\n".join(processed_sentences)
51
 
52
- # CSS for transparent boxes and background image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  background_css = """
54
  .gradio-container {
55
  background-image: url('https://huggingface.co/spaces/dnzblgn/Sarcasm_Detection/resolve/main/image.png');
@@ -57,20 +105,17 @@ background_css = """
57
  background-position: center;
58
  color: white;
59
  }
60
-
61
  .gr-input, .gr-textbox {
62
- background-color: rgba(255, 255, 255, 0.3) !important; /* Make the boxes transparent */
63
  border-radius: 10px;
64
  padding: 10px;
65
  color: black !important;
66
  }
67
-
68
  h1, h2, p {
69
- text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.8); /* Add subtle shadow to text for better readability */
70
  }
71
  """
72
 
73
- # Gradio UI with updated header and transparent design
74
  with gr.Blocks(css=background_css) as interface:
75
  gr.Markdown(
76
  """
@@ -81,14 +126,9 @@ with gr.Blocks(css=background_css) as interface:
81
 
82
  with gr.Tab("Text Input"):
83
  with gr.Row():
84
- text_input = gr.Textbox(
85
- lines=10,
86
- label="Enter Sentences",
87
- placeholder="Enter one or more sentences, each on a new line."
88
- )
89
  result_output = gr.Textbox(label="Results", lines=10, interactive=False)
90
  analyze_button = gr.Button("πŸ” Analyze")
91
-
92
  analyze_button.click(process_text_pipeline, inputs=text_input, outputs=result_output)
93
 
94
  with gr.Tab("Upload Text File"):
@@ -101,6 +141,19 @@ with gr.Blocks(css=background_css) as interface:
101
 
102
  file_input.change(process_file, inputs=file_input, outputs=file_output)
103
 
104
- # Run the interface
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  if __name__ == "__main__":
106
  interface.launch()
 
1
  import gradio as gr
 
2
  import torch
3
+ import torch.nn.functional as F
4
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline, DistilBertTokenizer, DistilBertForSequenceClassification
5
 
6
+ # ---------------- Original Sarcasm + Sentiment Models ----------------
7
  sarcasm_model = AutoModelForSequenceClassification.from_pretrained("dnzblgn/Sarcasm-Detection-Customer-Reviews")
 
8
  sarcasm_tokenizer = AutoTokenizer.from_pretrained("dnzblgn/Sarcasm-Detection-Customer-Reviews", use_fast=False)
9
+
10
+ sentiment_model = AutoModelForSequenceClassification.from_pretrained("dnzblgn/Sentiment-Analysis-Customer-Reviews")
11
  sentiment_tokenizer = AutoTokenizer.from_pretrained("dnzblgn/Sentiment-Analysis-Customer-Reviews", use_fast=False)
12
 
 
13
  def analyze_sentiment(sentence):
14
  inputs = sentiment_tokenizer(sentence, return_tensors="pt", truncation=True, padding=True, max_length=512)
15
  with torch.no_grad():
 
19
  sentiment_mapping = {1: "Negative", 0: "Positive"}
20
  return sentiment_mapping[predicted_class]
21
 
 
22
  def detect_sarcasm(sentence):
23
  inputs = sarcasm_tokenizer(sentence, return_tensors="pt", truncation=True, padding=True, max_length=512)
24
  with torch.no_grad():
 
27
  predicted_class = torch.argmax(logits, dim=-1).item()
28
  return "Sarcasm" if predicted_class == 1 else "Not Sarcasm"
29
 
 
30
  def process_text_pipeline(text):
31
+ sentences = text.split("\n")
32
  processed_sentences = []
33
 
34
  for sentence in sentences:
35
  sentence = sentence.strip()
36
  if not sentence:
37
+ continue
38
 
39
  sentiment = analyze_sentiment(sentence)
40
  if sentiment == "Negative":
 
48
 
49
  return "\n".join(processed_sentences)
50
 
51
+ # ---------------- Additional Sentiment Models (No Sarcasm) ----------------
52
+ additional_models = {
53
+ "siebert/sentiment-roberta-large-english": pipeline("sentiment-analysis", model="siebert/sentiment-roberta-large-english"),
54
+ "assemblyai/bert-large-uncased-sst2": AutoModelForSequenceClassification.from_pretrained("assemblyai/bert-large-uncased-sst2"),
55
+ "j-hartmann/sentiment-roberta-large-english-3-classes": pipeline("text-classification", model="j-hartmann/sentiment-roberta-large-english-3-classes", return_all_scores=True),
56
+ "cardiffnlp/twitter-xlm-roberta-base-sentiment": pipeline("sentiment-analysis", model="cardiffnlp/twitter-xlm-roberta-base-sentiment", tokenizer="cardiffnlp/twitter-xlm-roberta-base-sentiment"),
57
+ "sohan-ai/sentiment-analysis-model-amazon-reviews": DistilBertForSequenceClassification.from_pretrained("sohan-ai/sentiment-analysis-model-amazon-reviews")
58
+ }
59
+
60
+ def run_sentiment_with_selected_model(text, model_name):
61
+ if model_name == "siebert/sentiment-roberta-large-english":
62
+ result = additional_models[model_name](text)[0]
63
+ emoji = "βœ…" if result["label"].lower() == "positive" else "❌"
64
+ return f"{emoji} '{text}' -> {result['label']}"
65
+
66
+ elif model_name == "assemblyai/bert-large-uncased-sst2":
67
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
68
+ model = additional_models[model_name]
69
+ tokens = tokenizer([text], return_tensors="pt", padding=True, truncation=True)
70
+ outputs = F.softmax(model(**tokens).logits, dim=1)
71
+ prob_pos = outputs[0][1].item()
72
+ prob_neg = outputs[0][0].item()
73
+ emoji = "βœ…" if prob_pos > prob_neg else "❌"
74
+ return f"{emoji} '{text}' -> Positive: {prob_pos:.2%}, Negative: {prob_neg:.2%}"
75
+
76
+ elif model_name == "j-hartmann/sentiment-roberta-large-english-3-classes":
77
+ results = additional_models[model_name](text)[0]
78
+ label_scores = {res['label']: res['score'] for res in results}
79
+ label = max(label_scores, key=label_scores.get)
80
+ emoji = "βœ…" if "positive" in label.lower() else "❌" if "negative" in label.lower() else "⚠️"
81
+ score_str = ", ".join([f"{k}: {v:.2%}" for k, v in label_scores.items()])
82
+ return f"{emoji} '{text}' -> {score_str}"
83
+
84
+ elif model_name == "cardiffnlp/twitter-xlm-roberta-base-sentiment":
85
+ result = additional_models[model_name](text)[0]
86
+ emoji = "βœ…" if result["label"].lower() == "positive" else "❌" if result["label"].lower() == "negative" else "⚠️"
87
+ return f"{emoji} '{text}' -> {result['label']}"
88
+
89
+ elif model_name == "sohan-ai/sentiment-analysis-model-amazon-reviews":
90
+ tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
91
+ model = additional_models[model_name]
92
+ inputs = tokenizer(text, return_tensors="pt")
93
+ outputs = model(**inputs)
94
+ label = "Positive" if outputs.logits.argmax().item() == 1 else "Negative"
95
+ emoji = "βœ…" if label == "Positive" else "❌"
96
+ return f"{emoji} '{text}' -> {label}"
97
+
98
+ return f"⚠️ Could not process with selected model."
99
+
100
+ # ---------------- Gradio UI ----------------
101
  background_css = """
102
  .gradio-container {
103
  background-image: url('https://huggingface.co/spaces/dnzblgn/Sarcasm_Detection/resolve/main/image.png');
 
105
  background-position: center;
106
  color: white;
107
  }
 
108
  .gr-input, .gr-textbox {
109
+ background-color: rgba(255, 255, 255, 0.3) !important;
110
  border-radius: 10px;
111
  padding: 10px;
112
  color: black !important;
113
  }
 
114
  h1, h2, p {
115
+ text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.8);
116
  }
117
  """
118
 
 
119
  with gr.Blocks(css=background_css) as interface:
120
  gr.Markdown(
121
  """
 
126
 
127
  with gr.Tab("Text Input"):
128
  with gr.Row():
129
+ text_input = gr.Textbox(lines=10, label="Enter Sentences", placeholder="Enter one or more sentences, each on a new line.")
 
 
 
 
130
  result_output = gr.Textbox(label="Results", lines=10, interactive=False)
131
  analyze_button = gr.Button("πŸ” Analyze")
 
132
  analyze_button.click(process_text_pipeline, inputs=text_input, outputs=result_output)
133
 
134
  with gr.Tab("Upload Text File"):
 
141
 
142
  file_input.change(process_file, inputs=file_input, outputs=file_output)
143
 
144
+ with gr.Tab("Try Other Sentiment Models (No Sarcasm)"):
145
+ with gr.Row():
146
+ other_model_selector = gr.Dropdown(
147
+ choices=list(additional_models.keys()),
148
+ label="Choose a Sentiment Model"
149
+ )
150
+ with gr.Row():
151
+ model_text_input = gr.Textbox(lines=5, label="Enter Sentence")
152
+ model_result_output = gr.Textbox(label="Sentiment", lines=3, interactive=False)
153
+
154
+ run_model_btn = gr.Button("Run")
155
+ run_model_btn.click(run_sentiment_with_selected_model, inputs=[model_text_input, other_model_selector], outputs=model_result_output)
156
+
157
+ # ---------------- Run App ----------------
158
  if __name__ == "__main__":
159
  interface.launch()