Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -23,6 +23,20 @@ APP_CITATION = """
|
|
23 |
For citation, please refer to the tool's documentation.
|
24 |
"""
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
def inference_t5(input_text, selected_model):
|
27 |
model = T5ForClassification.from_pretrained("gokceuludogan/turna_tr_hateprint_w0.1_new_") #_b128")
|
28 |
tokenizer = AutoTokenizer.from_pretrained("gokceuludogan/turna_tr_hateprint_w0.1_new_") #_b128")
|
@@ -35,11 +49,11 @@ def perform_binary_classification(input_text, selected_model):
|
|
35 |
return inference_t5(input_text, selected_model)
|
36 |
|
37 |
model = pipeline(model=f'gokceuludogan/{selected_model}')
|
38 |
-
return model(input_text)[0]
|
39 |
|
40 |
-
def perform_categorization(input_text
|
41 |
-
model = pipeline(model=f'gokceuludogan/
|
42 |
-
return model(input_text)[0]
|
43 |
|
44 |
def perform_target_detection(input_text):
|
45 |
model = pipeline(model='gokceuludogan/turna_generation_tr_hateprint_target')
|
@@ -76,33 +90,21 @@ with gr.Blocks(theme="abidlabs/Lime") as hate_speech_demo:
|
|
76 |
inputs=[text_input_binary, model_choice_binary],
|
77 |
outputs=classification_output
|
78 |
)
|
79 |
-
gr.Examples(
|
80 |
-
examples=binary_classification_examples,
|
81 |
-
inputs=[text_input_binary, model_choice_binary],
|
82 |
-
outputs=classification_output,
|
83 |
-
fn=perform_binary_classification
|
84 |
-
)
|
85 |
|
86 |
# Hate Speech Categorization Tab
|
87 |
with gr.Tab("Hate Speech Categorization"):
|
88 |
gr.Markdown("Categorize the hate speech type in the provided text.")
|
89 |
with gr.Column():
|
90 |
-
model_choice_category = "berturk_tr_hateprint_cat_w0.1_b128"
|
91 |
|
92 |
text_input_category = gr.Textbox(label="Input Text")
|
93 |
categorize_button = gr.Button("Categorize")
|
94 |
categorization_output = gr.Textbox(label="Categorization Result")
|
95 |
categorize_button.click(
|
96 |
perform_categorization,
|
97 |
-
inputs=[text_input_category
|
98 |
outputs=categorization_output
|
99 |
)
|
100 |
-
|
101 |
-
examples=categorization_examples,
|
102 |
-
inputs=[text_input_category, model_choice_category],
|
103 |
-
outputs=categorization_output,
|
104 |
-
fn=perform_categorization
|
105 |
-
)
|
106 |
|
107 |
# Target Detection Tab
|
108 |
with gr.Tab("Target Detection"):
|
@@ -116,12 +118,7 @@ with gr.Blocks(theme="abidlabs/Lime") as hate_speech_demo:
|
|
116 |
inputs=[text_input_target],
|
117 |
outputs=target_output
|
118 |
)
|
119 |
-
|
120 |
-
examples=target_detection_examples,
|
121 |
-
inputs=[text_input_target],
|
122 |
-
outputs=target_output,
|
123 |
-
fn=perform_target_detection
|
124 |
-
)
|
125 |
|
126 |
# Multi Detection Tab
|
127 |
with gr.Tab("Multi Detection"):
|
@@ -135,12 +132,7 @@ with gr.Blocks(theme="abidlabs/Lime") as hate_speech_demo:
|
|
135 |
inputs=[text_input_multi],
|
136 |
outputs=multi_output
|
137 |
)
|
138 |
-
|
139 |
-
examples=target_detection_examples,
|
140 |
-
inputs=[text_input_multi],
|
141 |
-
outputs=multi_output,
|
142 |
-
fn=perform_multi_detection
|
143 |
-
)
|
144 |
|
145 |
# Citation Section
|
146 |
gr.Markdown(APP_CITATION)
|
|
|
23 |
For citation, please refer to the tool's documentation.
|
24 |
"""
|
25 |
|
26 |
+
binary_mapping = {
|
27 |
+
'LABEL_0': 'non-hateful',
|
28 |
+
'LABEL_1': 'hateful',
|
29 |
+
}
|
30 |
+
|
31 |
+
category_mapping = {
|
32 |
+
'LABEL_0': 'non-hateful',
|
33 |
+
'LABEL_1': 'symbolization',
|
34 |
+
'LABEL_2': 'exaggeration/generalization/attribution/distortion',
|
35 |
+
'LABEL_3': 'swearing/insult/defamation/dehumanization',
|
36 |
+
'LABEL_4': 'threat of enmity/war/attack/murder/harm',
|
37 |
+
}
|
38 |
+
|
39 |
+
|
40 |
def inference_t5(input_text, selected_model):
|
41 |
model = T5ForClassification.from_pretrained("gokceuludogan/turna_tr_hateprint_w0.1_new_") #_b128")
|
42 |
tokenizer = AutoTokenizer.from_pretrained("gokceuludogan/turna_tr_hateprint_w0.1_new_") #_b128")
|
|
|
49 |
return inference_t5(input_text, selected_model)
|
50 |
|
51 |
model = pipeline(model=f'gokceuludogan/{selected_model}')
|
52 |
+
return binary_mapping.get(model(input_text)[0]['label'], 'error')
|
53 |
|
54 |
+
def perform_categorization(input_text):
|
55 |
+
model = pipeline(model=f'gokceuludogan/berturk_tr_hateprint_cat_w0.1_b128')
|
56 |
+
return category_mapping.get(model(input_text)[0]['label'], 'error')
|
57 |
|
58 |
def perform_target_detection(input_text):
|
59 |
model = pipeline(model='gokceuludogan/turna_generation_tr_hateprint_target')
|
|
|
90 |
inputs=[text_input_binary, model_choice_binary],
|
91 |
outputs=classification_output
|
92 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
|
94 |
# Hate Speech Categorization Tab
|
95 |
with gr.Tab("Hate Speech Categorization"):
|
96 |
gr.Markdown("Categorize the hate speech type in the provided text.")
|
97 |
with gr.Column():
|
|
|
98 |
|
99 |
text_input_category = gr.Textbox(label="Input Text")
|
100 |
categorize_button = gr.Button("Categorize")
|
101 |
categorization_output = gr.Textbox(label="Categorization Result")
|
102 |
categorize_button.click(
|
103 |
perform_categorization,
|
104 |
+
inputs=[text_input_category],
|
105 |
outputs=categorization_output
|
106 |
)
|
107 |
+
|
|
|
|
|
|
|
|
|
|
|
108 |
|
109 |
# Target Detection Tab
|
110 |
with gr.Tab("Target Detection"):
|
|
|
118 |
inputs=[text_input_target],
|
119 |
outputs=target_output
|
120 |
)
|
121 |
+
|
|
|
|
|
|
|
|
|
|
|
122 |
|
123 |
# Multi Detection Tab
|
124 |
with gr.Tab("Multi Detection"):
|
|
|
132 |
inputs=[text_input_multi],
|
133 |
outputs=multi_output
|
134 |
)
|
135 |
+
|
|
|
|
|
|
|
|
|
|
|
136 |
|
137 |
# Citation Section
|
138 |
gr.Markdown(APP_CITATION)
|