gokceuludogan commited on
Commit
572f0ae
·
verified ·
1 Parent(s): 8b6ab08

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +117 -72
app.py CHANGED
@@ -1,98 +1,143 @@
1
-
2
  import gradio as gr
3
- import spaces
4
  from transformers import pipeline
5
- import torch
6
- import os
7
 
 
 
 
8
 
9
- token = os.getenv('HF_AUTH_TOKEN')
10
- print(token)
11
- binary_example = [["Yahudi terörüne karşı protestolar kararlılıkla devam ediyor."]]
12
- category_example = [["Ermeni zulmü sırasında hayatını kaybeden kadınlar anısına dikilen anıt ziyarete açıldı."]]
13
- target_example = [["Dün 5 bin suriyeli enik doğmuştur zaten Türkiyede aq 5 bin suriyelinin gitmesi çok çok az"]]
14
 
15
- DESCRIPTION = """"
 
16
  ## Hate Speech Detection in Turkish News
 
 
17
  """
18
 
19
- CITATION = """"
 
20
  """
21
 
22
- def binary_classification(input, choice):
23
- model = pipeline(model=f'gokceuludogan/{choice}')
24
- return model(input)[0]
 
25
 
26
- def category_classification(input, choice):
27
- model = pipeline(model=f'gokceuludogan/{choice}')
28
- return model(input)[0]
29
 
30
- def target_detection(input):
31
  model = pipeline(model='gokceuludogan/turna_generation_tr_hateprint_target')
32
- return model(input)[0]['generated_text']
33
 
34
- def multi_detection(input):
35
  model = pipeline(model='gokceuludogan/turna_generation_tr_hateprint_multi')
36
- return model(input)[0]['generated_text']
37
 
38
- with gr.Blocks(theme="abidlabs/Lime") as demo:
 
39
 
40
- #gr.Markdown("# TURNA")
41
- #gr.Image("images/turna-logo.png", width=100, show_label=False, show_download_button=False, show_share_button=False)
 
42
 
43
- with gr.Tab("TRHatePrint"):
44
- gr.Markdown(DESCRIPTION)
45
-
46
  with gr.Tab("Binary Classification"):
47
- gr.Markdown("Enter text to analyse hatefulness and pick the model.")
48
  with gr.Column():
49
- with gr.Row():
50
- with gr.Column():
51
- sentiment_choice = gr.Radio(choices = ["turna_tr_hateprint", "turna_tr_hateprint_5e6_w0.1_", "berturk_tr_hateprint_w0.1", "berturk_tr_hateprint_w0.1_b128"], label ="Model", value="turna_tr_hateprint")
52
- sentiment_input = gr.Textbox(label="Input")
53
-
54
- sentiment_submit = gr.Button()
55
- sentiment_output = gr.Textbox(label="Output")
56
- sentiment_submit.click(binary_classification, inputs=[sentiment_input, sentiment_choice], outputs=sentiment_output)
57
- sentiment_examples = gr.Examples(examples = binary_example, inputs = [sentiment_input, sentiment_choice], outputs=sentiment_output, fn=binary_classification)
58
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  with gr.Tab("Hate Speech Categorization"):
60
- gr.Markdown("Enter a hateful text to categorize or try the example.")
61
  with gr.Column():
62
- with gr.Row():
63
- with gr.Column():
64
- text_choice = gr.Radio(choices= ["berturk_tr_hateprint_cat_w0.1_b128", "berturk_tr_hateprint_cat_w0.1"])
65
- text_input = gr.Textbox(label="Input")
66
-
67
- text_submit = gr.Button()
68
- text_output = gr.Textbox(label="Output")
69
- text_submit.click(category_classification, inputs=[text_input, text_choice], outputs=text_output)
70
- text_examples = gr.Examples(examples = category_example,inputs=[text_input, text_choice], outputs=text_output, fn=category_classification)
71
-
72
-
 
 
 
 
 
 
 
 
 
73
  with gr.Tab("Target Detection"):
74
- gr.Markdown("Enter text to detect targets ")
75
  with gr.Column():
76
- with gr.Row():
77
- with gr.Column():
78
- nli_first_input = gr.Textbox(label="Input")
79
- nli_submit = gr.Button()
80
- nli_output = gr.Textbox(label="Output")
81
- nli_submit.click(target_detection, inputs=[nli_first_input], outputs=nli_output)
82
- nli_examples = gr.Examples(examples = target_example, inputs = [nli_first_input], outputs=nli_output, fn=target_detection)
83
-
84
-
 
 
 
 
 
 
 
85
  with gr.Tab("Multi Detection"):
86
- gr.Markdown("Enter text to detect hate, category, and targets ")
87
  with gr.Column():
88
- with gr.Row():
89
- with gr.Column():
90
- nli_first_input = gr.Textbox(label="Input")
91
- nli_submit = gr.Button()
92
- nli_output = gr.Textbox(label="Output")
93
- nli_submit.click(multi_detection, inputs=[nli_first_input], outputs=nli_output)
94
- nli_examples = gr.Examples(examples = target_example, inputs = [nli_first_input], outputs=nli_output, fn=multi_detection)
95
-
96
- gr.Markdown(CITATION)
97
-
98
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
 
2
  from transformers import pipeline
3
+ import os
 
4
 
5
+ # Retrieve Hugging Face authentication token from environment variables
6
+ hf_auth_token = os.getenv('HF_AUTH_TOKEN')
7
+ print(hf_auth_token)
8
 
9
+ # Example inputs for the different tasks
10
+ binary_classification_examples = [["Yahudi terörüne karşı protestolar kararlılıkla devam ediyor."]]
11
+ categorization_examples = [["Ermeni zulmü sırasında hayatını kaybeden kadınlar anısına dikilen anıt ziyarete açıldı."]]
12
+ target_detection_examples = [["Dün 5 bin suriyeli enik doğmuştur zaten Türkiyede aq 5 bin suriyelinin gitmesi çok çok az"]]
 
13
 
14
+ # Application description and citation placeholder
15
+ APP_DESCRIPTION = """
16
  ## Hate Speech Detection in Turkish News
17
+
18
+ This tool performs hate speech detection across several tasks, including binary classification, categorization, and target detection. Choose a model and input text to analyze its hatefulness, categorize it, or detect targets of hate speech.
19
  """
20
 
21
+ APP_CITATION = """
22
+ For citation, please refer to the tool's documentation.
23
  """
24
 
25
+ # Functions for model-based tasks
26
+ def perform_binary_classification(input_text, selected_model):
27
+ model = pipeline(model=f'gokceuludogan/{selected_model}')
28
+ return model(input_text)[0]
29
 
30
+ def perform_categorization(input_text, selected_model):
31
+ model = pipeline(model=f'gokceuludogan/{selected_model}')
32
+ return model(input_text)[0]
33
 
34
+ def perform_target_detection(input_text):
35
  model = pipeline(model='gokceuludogan/turna_generation_tr_hateprint_target')
36
+ return model(input_text)[0]['generated_text']
37
 
38
+ def perform_multi_detection(input_text):
39
  model = pipeline(model='gokceuludogan/turna_generation_tr_hateprint_multi')
40
+ return model(input_text)[0]['generated_text']
41
 
42
+ # Gradio interface
43
+ with gr.Blocks(theme="abidlabs/Lime") as hate_speech_demo:
44
 
45
+ # Main description
46
+ with gr.Tab("About"):
47
+ gr.Markdown(APP_DESCRIPTION)
48
 
49
+ # Binary Classification Tab
 
 
50
  with gr.Tab("Binary Classification"):
51
+ gr.Markdown("Analyze the hatefulness of a given text using selected models.")
52
  with gr.Column():
53
+ model_choice_binary = gr.Radio(
54
+ choices=[
55
+ "turna_tr_hateprint",
56
+ "turna_tr_hateprint_5e6_w0.1_",
57
+ "berturk_tr_hateprint_w0.1",
58
+ "berturk_tr_hateprint_w0.1_b128"
59
+ ],
60
+ label="Select Model",
61
+ value="turna_tr_hateprint"
62
+ )
63
+ text_input_binary = gr.Textbox(label="Input Text")
64
+ classify_button = gr.Button("Analyze")
65
+ classification_output = gr.Textbox(label="Classification Result")
66
+ classify_button.click(
67
+ perform_binary_classification,
68
+ inputs=[text_input_binary, model_choice_binary],
69
+ outputs=classification_output
70
+ )
71
+ gr.Examples(
72
+ examples=binary_classification_examples,
73
+ inputs=[text_input_binary, model_choice_binary],
74
+ outputs=classification_output,
75
+ fn=perform_binary_classification
76
+ )
77
+
78
+ # Hate Speech Categorization Tab
79
  with gr.Tab("Hate Speech Categorization"):
80
+ gr.Markdown("Categorize the hate speech type in the provided text.")
81
  with gr.Column():
82
+ model_choice_category = gr.Radio(
83
+ choices=["berturk_tr_hateprint_cat_w0.1_b128", "berturk_tr_hateprint_cat_w0.1"],
84
+ label="Select Model"
85
+ )
86
+ text_input_category = gr.Textbox(label="Input Text")
87
+ categorize_button = gr.Button("Categorize")
88
+ categorization_output = gr.Textbox(label="Categorization Result")
89
+ categorize_button.click(
90
+ perform_categorization,
91
+ inputs=[text_input_category, model_choice_category],
92
+ outputs=categorization_output
93
+ )
94
+ gr.Examples(
95
+ examples=categorization_examples,
96
+ inputs=[text_input_category, model_choice_category],
97
+ outputs=categorization_output,
98
+ fn=perform_categorization
99
+ )
100
+
101
+ # Target Detection Tab
102
  with gr.Tab("Target Detection"):
103
+ gr.Markdown("Detect the targets of hate speech in the provided text.")
104
  with gr.Column():
105
+ text_input_target = gr.Textbox(label="Input Text")
106
+ target_button = gr.Button("Detect Targets")
107
+ target_output = gr.Textbox(label="Target Detection Result")
108
+ target_button.click(
109
+ perform_target_detection,
110
+ inputs=[text_input_target],
111
+ outputs=target_output
112
+ )
113
+ gr.Examples(
114
+ examples=target_detection_examples,
115
+ inputs=[text_input_target],
116
+ outputs=target_output,
117
+ fn=perform_target_detection
118
+ )
119
+
120
+ # Multi Detection Tab
121
  with gr.Tab("Multi Detection"):
122
+ gr.Markdown("Detect hate speech, its category, and its targets in the text.")
123
  with gr.Column():
124
+ text_input_multi = gr.Textbox(label="Input Text")
125
+ multi_button = gr.Button("Detect All")
126
+ multi_output = gr.Textbox(label="Multi Detection Result")
127
+ multi_button.click(
128
+ perform_multi_detection,
129
+ inputs=[text_input_multi],
130
+ outputs=multi_output
131
+ )
132
+ gr.Examples(
133
+ examples=target_detection_examples,
134
+ inputs=[text_input_multi],
135
+ outputs=multi_output,
136
+ fn=perform_multi_detection
137
+ )
138
+
139
+ # Citation Section
140
+ gr.Markdown(APP_CITATION)
141
+
142
+ # Launch the application
143
+ hate_speech_demo.launch()