Update app.py
Browse files
app.py
CHANGED
@@ -238,7 +238,7 @@ addPreprocessing(target_tokenizer)
|
|
238 |
|
239 |
token_config = {
|
240 |
"add_special_tokens": True,
|
241 |
-
"return_tensors":
|
242 |
}
|
243 |
|
244 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
@@ -252,7 +252,7 @@ import gradio as gr
|
|
252 |
iface = gr.Interface(
|
253 |
fn=translate,
|
254 |
inputs=[
|
255 |
-
gr.Textbox("
|
256 |
gr.Radio(['greedy', 'beam search'], label="Decoding Strategy"),
|
257 |
gr.Number(label="Length Extend (for greedy)"),
|
258 |
gr.Number(label="Beam Size (for beam search)"),
|
|
|
238 |
|
239 |
token_config = {
|
240 |
"add_special_tokens": True,
|
241 |
+
"return_tensors": 'pt',
|
242 |
}
|
243 |
|
244 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
|
252 |
iface = gr.Interface(
|
253 |
fn=translate,
|
254 |
inputs=[
|
255 |
+
gr.Textbox(label="Input sentence"),
|
256 |
gr.Radio(['greedy', 'beam search'], label="Decoding Strategy"),
|
257 |
gr.Number(label="Length Extend (for greedy)"),
|
258 |
gr.Number(label="Beam Size (for beam search)"),
|