File size: 2,364 Bytes
f648ebc
 
 
 
46cb133
f648ebc
6743eeb
040f7cc
3c06843
851ef29
040f7cc
851ef29
ae062a4
040f7cc
ae062a4
a9b8087
 
82488ed
e9e9957
f2114ca
 
82488ed
 
a9b8087
f2114ca
82488ed
f648ebc
82488ed
f648ebc
82488ed
 
f648ebc
861614f
f648ebc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
from response_generation import ResponseGenerator
import gradio as gr

DEFAULT_MODEL = "shaneweisz/DialoGPT-finetuned-multiCONAN"
DECODING_CONFIG = {"max_new_tokens": 100, "min_new_tokens": 20, "no_repeat_ngram_size": 3, "num_beams": 10}

TITLE = "Automating Counterspeech in Dialogue Systems"
DESCRIPTION = """
Built by [Shane Weisz](https://shaneweisz.com) for my research project on _Automating Counterspeech in Dialogue Systems_ as part of the [MPhil in Machine Learning and Machine Intelligence](https://www.mlmi.eng.cam.ac.uk/) at Cambridge University.
<br/>
The project is supervised by [Dr Marcus Tomalin](https://www.crassh.cam.ac.uk/about/people/marcus-tomalin/) and forms part of the [Giving Voice to Digital Democracies](https://www.crassh.cam.ac.uk/research/projects-centres/giving-voice-to-digital-democracies/) project on the _The Social Impact of Artificially Intelligent Communications Technology_.
<br/>
The system is built by fine-tuning [DialoGPT](https://huggingface.co/microsoft/DialoGPT-medium#:~:text=DialoGPT%20is%20a%20SOTA%20large,single%2Dturn%20conversation%20Turing%20test) on the [MultiCONAN](https://github.com/marcoguerini/CONAN#Multitarget-CONAN) dataset, a dataset comprising a set of hate speech inputs and appropriate [counterspeech](https://dangerousspeech.org/counterspeech/) responses produced under the supervision of trained NGO operators from [Stop Hate UK](https://www.stophateuk.org/).
<br/><br/>
**Try it out**: Enter a hate speech comment (or select one of the provided examples below), click Submit, and see if the system generates an appropriate counterspeech response.
"""

ARTICLE = f"""
_Please note: This system is a prototype and cannot be guaranteed to always generate appropriate responses. Any inappropriate responses expressed by the system should not be construed as reflective of the views or values of the researchers._


**Model:** {DEFAULT_MODEL}<br>
**Decoding parameters:** {DECODING_CONFIG}
<br/><br/>

"""

model = ResponseGenerator(DEFAULT_MODEL, DECODING_CONFIG)

def respond(input):
    return model.respond(input)

demo = gr.Interface(fn=respond, inputs="text", outputs="text", examples=["Muslims are all terrorists", "Jews are selfish and greedy", "Why waste time listening to black women?"], cache_examples = False, title = TITLE, description = DESCRIPTION, article = ARTICLE)
demo.launch()