File size: 2,326 Bytes
fa9917c
c7baec5
fa9917c
 
df18eaf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45d0f71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df18eaf
c85af71
 
 
45d0f71
77e17ee
 
 
 
 
 
 
 
 
 
 
45d0f71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import gradio as gr
import torch
from transformers import pipeline

app_title = "Portuguese Hate Speech Detection"

app_description = """
This app detects hate speech on Portuguese text using multiple models. You can either introduce your own sentences by filling in "Text" or click on one of the examples provided below.
"""

app_examples = [
    ["As pessoas tem que perceber que ser 'panasca' não é deixar de ser homem, é deixar de ser humano hahaha"],
    ["Hoje tive uma conversa muito agradável com um colega meu"],
]

output_textbox_component_description = """
This box will display the hate speech detection results based on the average score of multiple models.
"""

output_json_component_description = { "breakdown": """
This box presents a detailed breakdown of the evaluation for each model.
"""}

model_list = [
    "knowhate/HateBERTimbau",
    "knowhate/HateBERTimbau-youtube",
    "knowhate/HateBERTimbau-twitter",
    "knowhate/HateBERTimbau-yt-tt",
]

user_friendly_name = {
    "knowhate/HateBERTimbau": "HateBERTimbau (Original)",
    "knowhate/HateBERTimbau-youtube": "HateBERTimbau (YouTube)",
    "knowhate/HateBERTimbau-twitter": "HateBERTimbau (Twitter)",
    "knowhate/HateBERTimbau-yt-tt": "HateBERTimbau (YouTube + Twitter)"
}

reverse_user_friendly_name = { v:k for k,v in user_friendly_name.items() }

user_friendly_name_list = list(user_friendly_name.values())

#pipe = pipeline("text-classification", model="knowhate/HateBERTimbau")
#demo = gr.Interface.from_pipeline(pipe)
#demo.launch()

def predict(s1, chosen_model):
    # Get model name from user-friendly name
    full_chosen_model_name = chosen_model

    # Initialize the pipeline with the chosen model
    model_pipeline = pipeline("text-classification", model=full_chosen_model_name)

    # Perform inference on the input text
    predicted_label = model_pipeline(s1)[0]  # Assuming your model returns a single prediction

    return scores

inputs = [
    gr.Textbox(label="Text", value=app_examples[0][0]),
    gr.Dropdown(label="Model", choices=user_friendly_name_list, value=user_friendly_name_list[0])
]

outputs = [
 gr.Label(label="Result"),
 gr.Markdown(),
]


gr.Interface(fn=predict, inputs=inputs, outputs=outputs, title=app_title, 
             description=app_description,
             examples=app_examples).launch()