|
import torch |
|
import gradio as gr |
|
from transformers import pipeline |
|
import ast |
|
|
|
translation_task_names = { |
|
'English to French': 'translation_en_to_fr', |
|
|
|
|
|
|
|
'English to German': 'translation_en_to_de', |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
'English to Romanian': 'translation_en_to_ro', |
|
} |
|
|
|
model_names = { |
|
'T5-Base': 't5-base', |
|
'T5-Small': 't5-small', |
|
'T5-Large': 't5-large' |
|
} |
|
|
|
|
|
loaded_models = {} |
|
|
|
|
|
def translate_text(model_choice, task_choice, text_input, load_in_8bit, device): |
|
model_key = (model_choice, task_choice, load_in_8bit) |
|
|
|
|
|
if model_key in loaded_models: |
|
translator = loaded_models[model_key] |
|
else: |
|
model_kwargs = {"load_in_8bit": load_in_8bit} if load_in_8bit else {} |
|
dtype = torch.float16 if load_in_8bit else torch.float32 |
|
translator = pipeline(task=translation_task_names[task_choice], |
|
model=model_names[model_choice], |
|
device=device, |
|
model_kwargs=model_kwargs, |
|
torch_dtype=dtype, |
|
use_fast=True |
|
) |
|
|
|
loaded_models[model_key] = translator |
|
|
|
translation = translator(text_input)[0]['translation_text'] |
|
return str(translation).strip() |
|
|
|
def launch(model_choice, task_choice, text_input, load_in_8bit, device): |
|
return translate_text(model_choice, task_choice, text_input, load_in_8bit, device) |
|
|
|
model_dropdown = gr.Dropdown(choices=list(model_names.keys()), label='Select Model') |
|
task_dropdown = gr.Dropdown(choices=list(translation_task_names.keys()), label='Select Translation Task') |
|
text_input = gr.Textbox(label="Input Text") |
|
load_in_8bit = gr.Checkbox(label="Load model in 8bit") |
|
|
|
device = gr.Radio(['cpu', 'cuda'], label='Select device', value='cpu') |
|
|
|
iface = gr.Interface(launch, inputs=[model_dropdown, task_dropdown, text_input, load_in_8bit, device], |
|
outputs=gr.Textbox(type="text", label="Translation")) |
|
iface.launch() |