Spaces:
Sleeping
Sleeping
File size: 3,206 Bytes
82c52c7 5cfd806 82c52c7 46318b8 5cfd806 6d8648d 504e122 3219277 82c52c7 0c69b28 82c52c7 ec49d0d 82c52c7 5cfd806 82c52c7 5ab728f 82c52c7 5cfd806 82c52c7 5cfd806 82c52c7 5cfd806 82c52c7 da4d792 82c52c7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
import gradio as gr
import torch
from transformers import T5Tokenizer, MT5ForConditionalGeneration
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
TOKENIZER = T5Tokenizer.from_pretrained('werent4/mt5TranslatorLT')
MODEL = MT5ForConditionalGeneration.from_pretrained("werent4/mt5TranslatorLT")
MODEL.to(DEVICE)
def translate(text, translation_way, max_length, num_beams):
translations_ways = {
"en-lt": "<EN2LT>",
"lt-en": "<LT2EN>"
}
if translation_way not in translations_ways:
raise ValueError(f"Invalid translation way: {translation_way}. Supported ways: {list(translations_ways.keys())}")
input_text = f"{translations_ways[translation_way]} {text}"
encoded_input = TOKENIZER(input_text, return_tensors="pt", padding=True, truncation=True, max_length=128).to(DEVICE)
with torch.no_grad():
output_tokens = MODEL.generate(
**encoded_input,
max_length=max_length,
num_beams=num_beams,
no_repeat_ngram_size=2,
early_stopping=True
)
return TOKENIZER.decode(output_tokens[0], skip_special_tokens=True)
with gr.Blocks() as interface:
gr.Markdown("<h1>Lt🔄En: Lithuanian to English and vice versa")
with gr.Row():
max_length = gr.Slider(1, 512, value=128, label="Max length", interactive=True)
num_beams = gr.Slider(1, 16, value=5, step=False, label="Num beams", interactive=True)
with gr.Row():
input_text = gr.Textbox(label="Text input", placeholder="Enter your text here")
with gr.Column():
translation_way = gr.Dropdown(label="Mode", choices=['en-lt', 'lt-en'])
translate_button = gr.Button("Translate")
output_text = gr.Textbox(label="Translated text")
with gr.Accordion("How to run the model locally:", open=False):
gr.Code("""import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from transformers import T5Tokenizer, MT5ForConditionalGeneration
tokenizer = T5Tokenizer.from_pretrained('werent4/mt5TranslatorLT')
model = MT5ForConditionalGeneration.from_pretrained("werent4/mt5TranslatorLT")
model.to(device)
def translate(text, model, tokenizer, device, translation_way = "en-lt"):
translations_ways = {
"en-lt": "<EN2LT>",
"lt-en": "<LT2EN>"
}
if translation_way not in translations_ways:
raise ValueError(f"Invalid translation way. Supported ways: {list(translations_ways.keys())}")
input_text = f"{translations_ways[translation_way]} {text}"
encoded_input = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True, max_length=128).to(device)
with torch.no_grad():
output_tokens = model.generate(
**encoded_input,
max_length=128,
num_beams=5,
no_repeat_ngram_size=2,
early_stopping=True
)
translated_text = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
return translated_text
text = "I live in Kaunas"
translate(text, model, tokenizer, device)
""", language='python')
translate_button.click(fn=translate, inputs=[input_text, translation_way, max_length, num_beams], outputs=[output_text])
interface.launch(share=True) |