consciousAI commited on
Commit
e165485
·
1 Parent(s): 1a48f25

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -16,19 +16,19 @@ M5 = "consciousAI/question-generation-auto-hints-t5-v1-base-s-q-c"
16
  device = ['cuda' if torch.cuda.is_available() else 'cpu'][0]
17
 
18
  _m0 = AutoModelForSeq2SeqLM.from_pretrained(M0).to(device)
19
- _tk0 = AutoTokenizer.from_pretrained(M0, cache_dir="./cache")
20
 
21
  _m1 = AutoModelForSeq2SeqLM.from_pretrained(M1).to(device)
22
- _tk1 = AutoTokenizer.from_pretrained(M1, cache_dir="./cache")
23
 
24
  _m2 = AutoModelForSeq2SeqLM.from_pretrained(M2).to(device)
25
- _tk2 = AutoTokenizer.from_pretrained(M2, cache_dir="./cache")
26
 
27
  _m4 = AutoModelForSeq2SeqLM.from_pretrained(M4).to(device)
28
- _tk4 = AutoTokenizer.from_pretrained(M4, cache_dir="./cache")
29
 
30
  _m5 = AutoModelForSeq2SeqLM.from_pretrained(M5).to(device)
31
- _tk5 = AutoTokenizer.from_pretrained(M5, cache_dir="./cache")
32
 
33
  def _formatQs(questions):
34
  _finalQs = ""
@@ -298,7 +298,7 @@ with gr.Blocks() as demo:
298
  topP = gr.Slider(0, 1, 0, label="Top P/Nucleus Sampling")
299
  temperature = gr.Slider(0.01, 1, 1, label="Temperature")
300
  with gr.Row():
301
- model = gr.Dropdown(["question-generation-auto-hints-t5-v1-base-s-q-c", "question-generation-auto-hints-t5-v1-base-s-q", "question-generation-auto-t5-v1-base-s-q-c", "question-generation-auto-t5-v1-base-s-q", "question-generation-auto-t5-v1-base-s", "All"], label="Model")
302
 
303
 
304
  with gr.Accordion(variant='compact', label='Input Values'):
 
16
  device = ['cuda' if torch.cuda.is_available() else 'cpu'][0]
17
 
18
  _m0 = AutoModelForSeq2SeqLM.from_pretrained(M0).to(device)
19
+ _tk0 = AutoTokenizer.from_pretrained(M0)
20
 
21
  _m1 = AutoModelForSeq2SeqLM.from_pretrained(M1).to(device)
22
+ _tk1 = AutoTokenizer.from_pretrained(M1)
23
 
24
  _m2 = AutoModelForSeq2SeqLM.from_pretrained(M2).to(device)
25
+ _tk2 = AutoTokenizer.from_pretrained(M2)
26
 
27
  _m4 = AutoModelForSeq2SeqLM.from_pretrained(M4).to(device)
28
+ _tk4 = AutoTokenizer.from_pretrained(M4)
29
 
30
  _m5 = AutoModelForSeq2SeqLM.from_pretrained(M5).to(device)
31
+ _tk5 = AutoTokenizer.from_pretrained(M5)
32
 
33
  def _formatQs(questions):
34
  _finalQs = ""
 
298
  topP = gr.Slider(0, 1, 0, label="Top P/Nucleus Sampling")
299
  temperature = gr.Slider(0.01, 1, 1, label="Temperature")
300
  with gr.Row():
301
+ model = gr.Dropdown(["question-generation-auto-hints-t5-v1-base-s-q-c", "question-generation-auto-hints-t5-v1-base-s-q", "question-generation-auto-t5-v1-base-s-q-c", "question-generation-auto-t5-v1-base-s-q", "question-generation-auto-t5-v1-base-s", "All"], label="Model", value="question-generation-auto-hints-t5-v1-base-s-q-c")
302
 
303
 
304
  with gr.Accordion(variant='compact', label='Input Values'):