File size: 2,464 Bytes
d3c3cdf
a9c4623
d3c3cdf
 
a01a63e
 
 
a9c4623
b1f712d
 
ee66224
 
d8fa3f9
b1f712d
 
 
7f2f5e9
b1f712d
a01a63e
 
7f2f5e9
a01a63e
 
 
7f2f5e9
a01a63e
 
 
7f2f5e9
a01a63e
 
 
7f2f5e9
d3c3cdf
93e8a0e
5805a27
b53e63b
d3c3cdf
b53e63b
99c08ee
b53e63b
7f2f5e9
a01a63e
e5d11d3
d3c3cdf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import gradio as gr
import torch
from transformers import BartForConditionalGeneration, BartTokenizer

# initialize model + tok variables
model = None
tok = None

# Examples for each models
examples = [
    ["interview-question-remake", "I have a cat named dolche and he's not very friendly with strangers. I've had him for 9 years now and it has been a pleasure to see him grow closer to us every year."],
    ["interview-length-tagged","Today's weather was really nice."],
    ["reverse-interview-question", "There are so many incredible musicians out there and so many really compelling big hits this year that it makes for a really interesting way to recap some of those big events."]
]

# Descriptions for each models
# descriptions = "Interview question remake is a model that..."

# pass in Strings of model choice and input text for context
def genQuestion(model_choice, context):
    # global descriptions
    if model_choice=="interview-question-remake":
        model = BartForConditionalGeneration.from_pretrained("hyechanjun/interview-question-remake")
        tok = BartTokenizer.from_pretrained("hyechanjun/interview-question-remake")
        # descriptions = "Interview question remake is a model that..."
    elif model_choice=="interview-length-tagged":
        model = BartForConditionalGeneration.from_pretrained("hyechanjun/interview-length-tagged")
        tok = BartTokenizer.from_pretrained("hyechanjun/interview-length-tagged")
        # descriptions = "Interview question tagged is a model that..."
    elif model_choice=="reverse-interview-question":
        model = BartForConditionalGeneration.from_pretrained("hyechanjun/reverse-interview-question")
        tok = BartTokenizer.from_pretrained("hyechanjun/reverse-interview-question")
        # descriptions = "Reverse interview question  is a model that..."

    inputs = tok(context, return_tensors="pt")
    output = model.generate(inputs["input_ids"], num_beams=4, max_length=64, min_length=9, num_return_sequences=4, diversity_penalty =1.0, num_beam_groups=4)
    final_output = ''

    for i in range(4):
        final_output +=  [tok.decode(beam, skip_special_tokens=True, clean_up_tokenization_spaces=False) for beam in output][i] + "\n"

    return final_output

iface = gr.Interface(fn=genQuestion, inputs=[gr.inputs.Dropdown(["interview-question-remake", "interview-length-tagged", "reverse-interview-question"]), "text"], examples=examples, outputs="text")
iface.launch()