Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -128,16 +128,14 @@ lang_id = {
|
|
128 |
def translation_text(source_lang, target_lang, user_input):
|
129 |
src_lang = lang_id[source_lang]
|
130 |
trg_lang = lang_id[target_lang]
|
131 |
-
print(trg_lang)
|
132 |
tokenizer.src_lang = src_lang
|
133 |
with torch.no_grad():
|
|
|
134 |
encoded_input = tokenizer(user_input, return_tensors="pt")
|
135 |
-
|
136 |
generated_tokens = translation_model.generate(**encoded_input, forced_bos_token_id=tokenizer.get_lang_id(trg_lang))
|
137 |
-
|
138 |
-
|
139 |
-
generated_tokens, skip_special_tokens=True)[0]
|
140 |
-
return encoded_input
|
141 |
|
142 |
def print_s(source_lang, target_lang, text0):
|
143 |
print(source_lang)
|
|
|
128 |
def translation_text(source_lang, target_lang, user_input):
|
129 |
src_lang = lang_id[source_lang]
|
130 |
trg_lang = lang_id[target_lang]
|
|
|
131 |
tokenizer.src_lang = src_lang
|
132 |
with torch.no_grad():
|
133 |
+
|
134 |
encoded_input = tokenizer(user_input, return_tensors="pt")
|
135 |
+
|
136 |
generated_tokens = translation_model.generate(**encoded_input, forced_bos_token_id=tokenizer.get_lang_id(trg_lang))
|
137 |
+
translated_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
|
138 |
+
return translated_text
|
|
|
|
|
139 |
|
140 |
def print_s(source_lang, target_lang, text0):
|
141 |
print(source_lang)
|