consciousAI commited on
Commit
1a48f25
·
1 Parent(s): 796ae95

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +170 -85
app.py CHANGED
@@ -60,92 +60,177 @@ def _generate(mode, context, hint=None, minLength=50, maxLength=500, lengthPenal
60
 
61
  if mode == 'Auto':
62
  _inputText = "question_context: " + context
63
- if model == "All":
64
 
65
- _encoding = _tk0.encode(_inputText, return_tensors='pt', truncation=True, padding='max_length').to(device) # max_length=1024
66
- _outputEncoded = _m0.generate(_encoding,
67
- min_length=minLength,
68
- max_length=maxLength,
69
- length_penalty=lengthPenalty,
70
- early_stopping=earlyStopping,
71
- num_return_sequences=numReturnSequences,
72
- num_beams=numBeams,
73
- no_repeat_ngram_size=noRepeatNGramSize,
74
- do_sample=doSample,
75
- top_k=topK,
76
- penalty_alpha=penaltyAlpha,
77
- top_p=topP,
78
- temperature=temperature
79
- )
80
- predictionM0 = [_tk0.decode(id, clean_up_tokenization_spaces=False, skip_special_tokens=True) for id in _outputEncoded]
81
-
82
- _encoding = _tk1.encode(_inputText, return_tensors='pt', truncation=True, padding='max_length').to(device) # max_length=1024
83
- _outputEncoded = _m1.generate(_encoding,
84
- min_length=minLength,
85
- max_length=maxLength,
86
- length_penalty=lengthPenalty,
87
- early_stopping=earlyStopping,
88
- num_return_sequences=numReturnSequences,
89
- num_beams=numBeams,
90
- no_repeat_ngram_size=noRepeatNGramSize,
91
- do_sample=doSample,
92
- top_k=topK,
93
- penalty_alpha=penaltyAlpha,
94
- top_p=topP,
95
- temperature=temperature
96
- )
97
- predictionM1 = [_tk1.decode(id, clean_up_tokenization_spaces=False, skip_special_tokens=True) for id in _outputEncoded]
98
 
99
- _encoding = _tk2.encode(_inputText, return_tensors='pt', truncation=True, padding='max_length').to(device) # max_length=1024 .to(device)
100
- _outputEncoded = _m2.generate(_encoding,
101
- min_length=minLength,
102
- max_length=maxLength,
103
- length_penalty=lengthPenalty,
104
- early_stopping=earlyStopping,
105
- num_return_sequences=numReturnSequences,
106
- num_beams=numBeams,
107
- no_repeat_ngram_size=noRepeatNGramSize,
108
- do_sample=doSample,
109
- top_k=topK,
110
- penalty_alpha=penaltyAlpha,
111
- top_p=topP,
112
- temperature=temperature
113
- )
114
- predictionM2 = [_tk2.decode(id, clean_up_tokenization_spaces=False, skip_special_tokens=True) for id in _outputEncoded]
115
-
116
- _encoding = _tk4.encode(_inputText, return_tensors='pt', truncation=True, padding='max_length').to(device) # max_length=1024 .to(device)
117
- _outputEncoded = _m4.generate(_encoding,
118
- min_length=minLength,
119
- max_length=maxLength,
120
- length_penalty=lengthPenalty,
121
- early_stopping=earlyStopping,
122
- num_return_sequences=numReturnSequences,
123
- num_beams=numBeams,
124
- no_repeat_ngram_size=noRepeatNGramSize,
125
- do_sample=doSample,
126
- top_k=topK,
127
- penalty_alpha=penaltyAlpha,
128
- top_p=topP,
129
- temperature=temperature
130
- )
131
- predictionM4 = [_tk4.decode(id, clean_up_tokenization_spaces=False, skip_special_tokens=True) for id in _outputEncoded]
132
-
133
- _encoding = _tk5.encode(_inputText, return_tensors='pt', truncation=True, padding='max_length').to(device) # max_length=1024 .to(device)
134
- _outputEncoded = _m5.generate(_encoding,
135
- min_length=minLength,
136
- max_length=maxLength,
137
- length_penalty=lengthPenalty,
138
- early_stopping=earlyStopping,
139
- num_return_sequences=numReturnSequences,
140
- num_beams=numBeams,
141
- no_repeat_ngram_size=noRepeatNGramSize,
142
- do_sample=doSample,
143
- top_k=topK,
144
- penalty_alpha=penaltyAlpha,
145
- top_p=topP,
146
- temperature=temperature
147
- )
148
- predictionM5 = [_tk5.decode(id, clean_up_tokenization_spaces=False, skip_special_tokens=True) for id in _outputEncoded]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  elif mode == 'Hints':
150
  _inputText = "question_hint: " + hint + "</s>question_context: " + context
151
 
@@ -232,7 +317,7 @@ with gr.Blocks() as demo:
232
  with gr.Row(variant='compact'):
233
  _predictionM2 = gr.Textbox(label="Predicted Questions - question-generation-auto-t5-v1-base-s-q-c [No Hints]")
234
  _predictionM1 = gr.Textbox(label="Predicted Questions - question-generation-auto-t5-v1-base-s-q [No Hints]")
235
- _predictionM0 = gr.Textbox(label="Predicted Questions - question-generation-auto-t5-v1-base-s-q [No Hints]")
236
 
237
  with gr.Row():
238
  gen_btn = gr.Button("Generate Questions")
 
60
 
61
  if mode == 'Auto':
62
  _inputText = "question_context: " + context
 
63
 
64
+ if model == "All":
65
+ _encoding = _tk0.encode(_inputText, return_tensors='pt', truncation=True, padding='max_length').to(device) # max_length=1024
66
+ _outputEncoded = _m0.generate(_encoding,
67
+ min_length=minLength,
68
+ max_length=maxLength,
69
+ length_penalty=lengthPenalty,
70
+ early_stopping=earlyStopping,
71
+ num_return_sequences=numReturnSequences,
72
+ num_beams=numBeams,
73
+ no_repeat_ngram_size=noRepeatNGramSize,
74
+ do_sample=doSample,
75
+ top_k=topK,
76
+ penalty_alpha=penaltyAlpha,
77
+ top_p=topP,
78
+ temperature=temperature
79
+ )
80
+ predictionM0 = [_tk0.decode(id, clean_up_tokenization_spaces=False, skip_special_tokens=True) for id in _outputEncoded]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
+ _encoding = _tk1.encode(_inputText, return_tensors='pt', truncation=True, padding='max_length').to(device) # max_length=1024
83
+ _outputEncoded = _m1.generate(_encoding,
84
+ min_length=minLength,
85
+ max_length=maxLength,
86
+ length_penalty=lengthPenalty,
87
+ early_stopping=earlyStopping,
88
+ num_return_sequences=numReturnSequences,
89
+ num_beams=numBeams,
90
+ no_repeat_ngram_size=noRepeatNGramSize,
91
+ do_sample=doSample,
92
+ top_k=topK,
93
+ penalty_alpha=penaltyAlpha,
94
+ top_p=topP,
95
+ temperature=temperature
96
+ )
97
+ predictionM1 = [_tk1.decode(id, clean_up_tokenization_spaces=False, skip_special_tokens=True) for id in _outputEncoded]
98
+
99
+ _encoding = _tk2.encode(_inputText, return_tensors='pt', truncation=True, padding='max_length').to(device) # max_length=1024 .to(device)
100
+ _outputEncoded = _m2.generate(_encoding,
101
+ min_length=minLength,
102
+ max_length=maxLength,
103
+ length_penalty=lengthPenalty,
104
+ early_stopping=earlyStopping,
105
+ num_return_sequences=numReturnSequences,
106
+ num_beams=numBeams,
107
+ no_repeat_ngram_size=noRepeatNGramSize,
108
+ do_sample=doSample,
109
+ top_k=topK,
110
+ penalty_alpha=penaltyAlpha,
111
+ top_p=topP,
112
+ temperature=temperature
113
+ )
114
+ predictionM2 = [_tk2.decode(id, clean_up_tokenization_spaces=False, skip_special_tokens=True) for id in _outputEncoded]
115
+
116
+ _encoding = _tk4.encode(_inputText, return_tensors='pt', truncation=True, padding='max_length').to(device) # max_length=1024 .to(device)
117
+ _outputEncoded = _m4.generate(_encoding,
118
+ min_length=minLength,
119
+ max_length=maxLength,
120
+ length_penalty=lengthPenalty,
121
+ early_stopping=earlyStopping,
122
+ num_return_sequences=numReturnSequences,
123
+ num_beams=numBeams,
124
+ no_repeat_ngram_size=noRepeatNGramSize,
125
+ do_sample=doSample,
126
+ top_k=topK,
127
+ penalty_alpha=penaltyAlpha,
128
+ top_p=topP,
129
+ temperature=temperature
130
+ )
131
+ predictionM4 = [_tk4.decode(id, clean_up_tokenization_spaces=False, skip_special_tokens=True) for id in _outputEncoded]
132
+
133
+ _encoding = _tk5.encode(_inputText, return_tensors='pt', truncation=True, padding='max_length').to(device) # max_length=1024 .to(device)
134
+ _outputEncoded = _m5.generate(_encoding,
135
+ min_length=minLength,
136
+ max_length=maxLength,
137
+ length_penalty=lengthPenalty,
138
+ early_stopping=earlyStopping,
139
+ num_return_sequences=numReturnSequences,
140
+ num_beams=numBeams,
141
+ no_repeat_ngram_size=noRepeatNGramSize,
142
+ do_sample=doSample,
143
+ top_k=topK,
144
+ penalty_alpha=penaltyAlpha,
145
+ top_p=topP,
146
+ temperature=temperature
147
+ )
148
+ predictionM5 = [_tk5.decode(id, clean_up_tokenization_spaces=False, skip_special_tokens=True) for id in _outputEncoded]
149
+ elif model == "question-generation-auto-hints-t5-v1-base-s-q-c":
150
+ _encoding = _tk5.encode(_inputText, return_tensors='pt', truncation=True, padding='max_length').to(device) # max_length=1024 .to(device)
151
+ _outputEncoded = _m5.generate(_encoding,
152
+ min_length=minLength,
153
+ max_length=maxLength,
154
+ length_penalty=lengthPenalty,
155
+ early_stopping=earlyStopping,
156
+ num_return_sequences=numReturnSequences,
157
+ num_beams=numBeams,
158
+ no_repeat_ngram_size=noRepeatNGramSize,
159
+ do_sample=doSample,
160
+ top_k=topK,
161
+ penalty_alpha=penaltyAlpha,
162
+ top_p=topP,
163
+ temperature=temperature
164
+ )
165
+ predictionM5 = [_tk5.decode(id, clean_up_tokenization_spaces=False, skip_special_tokens=True) for id in _outputEncoded]
166
+ elif model == "question-generation-auto-hints-t5-v1-base-s-q":
167
+ _encoding = _tk4.encode(_inputText, return_tensors='pt', truncation=True, padding='max_length').to(device) # max_length=1024 .to(device)
168
+ _outputEncoded = _m4.generate(_encoding,
169
+ min_length=minLength,
170
+ max_length=maxLength,
171
+ length_penalty=lengthPenalty,
172
+ early_stopping=earlyStopping,
173
+ num_return_sequences=numReturnSequences,
174
+ num_beams=numBeams,
175
+ no_repeat_ngram_size=noRepeatNGramSize,
176
+ do_sample=doSample,
177
+ top_k=topK,
178
+ penalty_alpha=penaltyAlpha,
179
+ top_p=topP,
180
+ temperature=temperature
181
+ )
182
+ predictionM4 = [_tk4.decode(id, clean_up_tokenization_spaces=False, skip_special_tokens=True) for id in _outputEncoded]
183
+ elif model == "question-generation-auto-t5-v1-base-s-q-c":
184
+ _encoding = _tk2.encode(_inputText, return_tensors='pt', truncation=True, padding='max_length').to(device) # max_length=1024 .to(device)
185
+ _outputEncoded = _m2.generate(_encoding,
186
+ min_length=minLength,
187
+ max_length=maxLength,
188
+ length_penalty=lengthPenalty,
189
+ early_stopping=earlyStopping,
190
+ num_return_sequences=numReturnSequences,
191
+ num_beams=numBeams,
192
+ no_repeat_ngram_size=noRepeatNGramSize,
193
+ do_sample=doSample,
194
+ top_k=topK,
195
+ penalty_alpha=penaltyAlpha,
196
+ top_p=topP,
197
+ temperature=temperature
198
+ )
199
+ predictionM2 = [_tk2.decode(id, clean_up_tokenization_spaces=False, skip_special_tokens=True) for id in _outputEncoded]
200
+ elif model == "question-generation-auto-t5-v1-base-s-q":
201
+ _encoding = _tk1.encode(_inputText, return_tensors='pt', truncation=True, padding='max_length').to(device) # max_length=1024
202
+ _outputEncoded = _m1.generate(_encoding,
203
+ min_length=minLength,
204
+ max_length=maxLength,
205
+ length_penalty=lengthPenalty,
206
+ early_stopping=earlyStopping,
207
+ num_return_sequences=numReturnSequences,
208
+ num_beams=numBeams,
209
+ no_repeat_ngram_size=noRepeatNGramSize,
210
+ do_sample=doSample,
211
+ top_k=topK,
212
+ penalty_alpha=penaltyAlpha,
213
+ top_p=topP,
214
+ temperature=temperature
215
+ )
216
+ predictionM1 = [_tk1.decode(id, clean_up_tokenization_spaces=False, skip_special_tokens=True) for id in _outputEncoded]
217
+ elif model == "question-generation-auto-t5-v1-base-s":
218
+ _encoding = _tk0.encode(_inputText, return_tensors='pt', truncation=True, padding='max_length').to(device) # max_length=1024
219
+ _outputEncoded = _m0.generate(_encoding,
220
+ min_length=minLength,
221
+ max_length=maxLength,
222
+ length_penalty=lengthPenalty,
223
+ early_stopping=earlyStopping,
224
+ num_return_sequences=numReturnSequences,
225
+ num_beams=numBeams,
226
+ no_repeat_ngram_size=noRepeatNGramSize,
227
+ do_sample=doSample,
228
+ top_k=topK,
229
+ penalty_alpha=penaltyAlpha,
230
+ top_p=topP,
231
+ temperature=temperature
232
+ )
233
+ predictionM0 = [_tk0.decode(id, clean_up_tokenization_spaces=False, skip_special_tokens=True) for id in _outputEncoded]
234
  elif mode == 'Hints':
235
  _inputText = "question_hint: " + hint + "</s>question_context: " + context
236
 
 
317
  with gr.Row(variant='compact'):
318
  _predictionM2 = gr.Textbox(label="Predicted Questions - question-generation-auto-t5-v1-base-s-q-c [No Hints]")
319
  _predictionM1 = gr.Textbox(label="Predicted Questions - question-generation-auto-t5-v1-base-s-q [No Hints]")
320
+ _predictionM0 = gr.Textbox(label="Predicted Questions - question-generation-auto-t5-v1-base-s [No Hints]")
321
 
322
  with gr.Row():
323
  gen_btn = gr.Button("Generate Questions")