Spaces:
Sleeping
Sleeping
vkovacs
commited on
Commit
·
c1f3033
1
Parent(s):
56b1f1f
64 tokens might be enough for sentences
Browse files
app.py
CHANGED
@@ -47,7 +47,7 @@ def predict(text, model_id, tokenizer_id):
|
|
47 |
model.to(device)
|
48 |
|
49 |
inputs = tokenizer(text,
|
50 |
-
max_length=
|
51 |
truncation=True,
|
52 |
padding="do_not_pad",
|
53 |
return_tensors="pt").to(device)
|
|
|
47 |
model.to(device)
|
48 |
|
49 |
inputs = tokenizer(text,
|
50 |
+
max_length=64,
|
51 |
truncation=True,
|
52 |
padding="do_not_pad",
|
53 |
return_tensors="pt").to(device)
|