Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,98 +1,143 @@
|
|
1 |
-
|
2 |
import gradio as gr
|
3 |
-
import spaces
|
4 |
from transformers import pipeline
|
5 |
-
import
|
6 |
-
import os
|
7 |
|
|
|
|
|
|
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
target_example = [["Dün 5 bin suriyeli enik doğmuştur zaten Türkiyede aq 5 bin suriyelinin gitmesi çok çok az"]]
|
14 |
|
15 |
-
|
|
|
16 |
## Hate Speech Detection in Turkish News
|
|
|
|
|
17 |
"""
|
18 |
|
19 |
-
|
|
|
20 |
"""
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
25 |
|
26 |
-
def
|
27 |
-
model = pipeline(model=f'gokceuludogan/{
|
28 |
-
return model(
|
29 |
|
30 |
-
def
|
31 |
model = pipeline(model='gokceuludogan/turna_generation_tr_hateprint_target')
|
32 |
-
return model(
|
33 |
|
34 |
-
def
|
35 |
model = pipeline(model='gokceuludogan/turna_generation_tr_hateprint_multi')
|
36 |
-
return model(
|
37 |
|
38 |
-
|
|
|
39 |
|
40 |
-
#
|
41 |
-
|
|
|
42 |
|
43 |
-
|
44 |
-
gr.Markdown(DESCRIPTION)
|
45 |
-
|
46 |
with gr.Tab("Binary Classification"):
|
47 |
-
gr.Markdown("
|
48 |
with gr.Column():
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
with gr.Tab("Hate Speech Categorization"):
|
60 |
-
gr.Markdown("
|
61 |
with gr.Column():
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
with gr.Tab("Target Detection"):
|
74 |
-
gr.Markdown("
|
75 |
with gr.Column():
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
with gr.Tab("Multi Detection"):
|
86 |
-
gr.Markdown("
|
87 |
with gr.Column():
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
|
|
2 |
from transformers import pipeline
|
3 |
+
import os
|
|
|
4 |
|
5 |
+
# Retrieve Hugging Face authentication token from environment variables
|
6 |
+
hf_auth_token = os.getenv('HF_AUTH_TOKEN')
|
7 |
+
print(hf_auth_token)
|
8 |
|
9 |
+
# Example inputs for the different tasks
|
10 |
+
binary_classification_examples = [["Yahudi terörüne karşı protestolar kararlılıkla devam ediyor."]]
|
11 |
+
categorization_examples = [["Ermeni zulmü sırasında hayatını kaybeden kadınlar anısına dikilen anıt ziyarete açıldı."]]
|
12 |
+
target_detection_examples = [["Dün 5 bin suriyeli enik doğmuştur zaten Türkiyede aq 5 bin suriyelinin gitmesi çok çok az"]]
|
|
|
13 |
|
14 |
+
# Application description and citation placeholder
|
15 |
+
APP_DESCRIPTION = """
|
16 |
## Hate Speech Detection in Turkish News
|
17 |
+
|
18 |
+
This tool performs hate speech detection across several tasks, including binary classification, categorization, and target detection. Choose a model and input text to analyze its hatefulness, categorize it, or detect targets of hate speech.
|
19 |
"""
|
20 |
|
21 |
+
APP_CITATION = """
|
22 |
+
For citation, please refer to the tool's documentation.
|
23 |
"""
|
24 |
|
25 |
+
# Functions for model-based tasks
|
26 |
+
def perform_binary_classification(input_text, selected_model):
|
27 |
+
model = pipeline(model=f'gokceuludogan/{selected_model}')
|
28 |
+
return model(input_text)[0]
|
29 |
|
30 |
+
def perform_categorization(input_text, selected_model):
|
31 |
+
model = pipeline(model=f'gokceuludogan/{selected_model}')
|
32 |
+
return model(input_text)[0]
|
33 |
|
34 |
+
def perform_target_detection(input_text):
|
35 |
model = pipeline(model='gokceuludogan/turna_generation_tr_hateprint_target')
|
36 |
+
return model(input_text)[0]['generated_text']
|
37 |
|
38 |
+
def perform_multi_detection(input_text):
|
39 |
model = pipeline(model='gokceuludogan/turna_generation_tr_hateprint_multi')
|
40 |
+
return model(input_text)[0]['generated_text']
|
41 |
|
42 |
+
# Gradio interface
|
43 |
+
with gr.Blocks(theme="abidlabs/Lime") as hate_speech_demo:
|
44 |
|
45 |
+
# Main description
|
46 |
+
with gr.Tab("About"):
|
47 |
+
gr.Markdown(APP_DESCRIPTION)
|
48 |
|
49 |
+
# Binary Classification Tab
|
|
|
|
|
50 |
with gr.Tab("Binary Classification"):
|
51 |
+
gr.Markdown("Analyze the hatefulness of a given text using selected models.")
|
52 |
with gr.Column():
|
53 |
+
model_choice_binary = gr.Radio(
|
54 |
+
choices=[
|
55 |
+
"turna_tr_hateprint",
|
56 |
+
"turna_tr_hateprint_5e6_w0.1_",
|
57 |
+
"berturk_tr_hateprint_w0.1",
|
58 |
+
"berturk_tr_hateprint_w0.1_b128"
|
59 |
+
],
|
60 |
+
label="Select Model",
|
61 |
+
value="turna_tr_hateprint"
|
62 |
+
)
|
63 |
+
text_input_binary = gr.Textbox(label="Input Text")
|
64 |
+
classify_button = gr.Button("Analyze")
|
65 |
+
classification_output = gr.Textbox(label="Classification Result")
|
66 |
+
classify_button.click(
|
67 |
+
perform_binary_classification,
|
68 |
+
inputs=[text_input_binary, model_choice_binary],
|
69 |
+
outputs=classification_output
|
70 |
+
)
|
71 |
+
gr.Examples(
|
72 |
+
examples=binary_classification_examples,
|
73 |
+
inputs=[text_input_binary, model_choice_binary],
|
74 |
+
outputs=classification_output,
|
75 |
+
fn=perform_binary_classification
|
76 |
+
)
|
77 |
+
|
78 |
+
# Hate Speech Categorization Tab
|
79 |
with gr.Tab("Hate Speech Categorization"):
|
80 |
+
gr.Markdown("Categorize the hate speech type in the provided text.")
|
81 |
with gr.Column():
|
82 |
+
model_choice_category = gr.Radio(
|
83 |
+
choices=["berturk_tr_hateprint_cat_w0.1_b128", "berturk_tr_hateprint_cat_w0.1"],
|
84 |
+
label="Select Model"
|
85 |
+
)
|
86 |
+
text_input_category = gr.Textbox(label="Input Text")
|
87 |
+
categorize_button = gr.Button("Categorize")
|
88 |
+
categorization_output = gr.Textbox(label="Categorization Result")
|
89 |
+
categorize_button.click(
|
90 |
+
perform_categorization,
|
91 |
+
inputs=[text_input_category, model_choice_category],
|
92 |
+
outputs=categorization_output
|
93 |
+
)
|
94 |
+
gr.Examples(
|
95 |
+
examples=categorization_examples,
|
96 |
+
inputs=[text_input_category, model_choice_category],
|
97 |
+
outputs=categorization_output,
|
98 |
+
fn=perform_categorization
|
99 |
+
)
|
100 |
+
|
101 |
+
# Target Detection Tab
|
102 |
with gr.Tab("Target Detection"):
|
103 |
+
gr.Markdown("Detect the targets of hate speech in the provided text.")
|
104 |
with gr.Column():
|
105 |
+
text_input_target = gr.Textbox(label="Input Text")
|
106 |
+
target_button = gr.Button("Detect Targets")
|
107 |
+
target_output = gr.Textbox(label="Target Detection Result")
|
108 |
+
target_button.click(
|
109 |
+
perform_target_detection,
|
110 |
+
inputs=[text_input_target],
|
111 |
+
outputs=target_output
|
112 |
+
)
|
113 |
+
gr.Examples(
|
114 |
+
examples=target_detection_examples,
|
115 |
+
inputs=[text_input_target],
|
116 |
+
outputs=target_output,
|
117 |
+
fn=perform_target_detection
|
118 |
+
)
|
119 |
+
|
120 |
+
# Multi Detection Tab
|
121 |
with gr.Tab("Multi Detection"):
|
122 |
+
gr.Markdown("Detect hate speech, its category, and its targets in the text.")
|
123 |
with gr.Column():
|
124 |
+
text_input_multi = gr.Textbox(label="Input Text")
|
125 |
+
multi_button = gr.Button("Detect All")
|
126 |
+
multi_output = gr.Textbox(label="Multi Detection Result")
|
127 |
+
multi_button.click(
|
128 |
+
perform_multi_detection,
|
129 |
+
inputs=[text_input_multi],
|
130 |
+
outputs=multi_output
|
131 |
+
)
|
132 |
+
gr.Examples(
|
133 |
+
examples=target_detection_examples,
|
134 |
+
inputs=[text_input_multi],
|
135 |
+
outputs=multi_output,
|
136 |
+
fn=perform_multi_detection
|
137 |
+
)
|
138 |
+
|
139 |
+
# Citation Section
|
140 |
+
gr.Markdown(APP_CITATION)
|
141 |
+
|
142 |
+
# Launch the application
|
143 |
+
hate_speech_demo.launch()
|