Spaces:
Sleeping
Sleeping
File size: 1,335 Bytes
df9751e f92a692 df9751e e7a45f0 df9751e e7a45f0 df9751e e7a45f0 3fa5eb5 e7a45f0 df9751e 3fa5eb5 df9751e 8575513 df9751e 8575513 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
import gradio as gr
import torch
from transformers import MarianMTModel, MarianTokenizer
# Load the MarianMT model and tokenizer
model_name = "Dddixyy/latin-italian-translator"
tokenizer = MarianTokenizer.from_pretrained(model_name)
model = MarianMTModel.from_pretrained(model_name)
# Translation function
def translate_latin_to_italian(latin_text):
# Truncate input to a maximum length of 512 tokens to avoid overload
inputs = tokenizer(latin_text, return_tensors="pt", padding=True, truncation=True, max_length=512)
# Use torch.no_grad() to speed up inference by not calculating gradients
with torch.no_grad():
generated_ids = model.generate(inputs["input_ids"])
# Decode the generated ids into a readable translation
translation = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
return translation[0]
# Define the Gradio interface
interface = gr.Interface(
fn=translate_latin_to_italian,
inputs="text",
outputs="text",
title="Latin to Italian Translator",
description="Translate Latin sentences to Italian using a fine-tuned MarianMT model.",
examples=[["Amor vincit omnia."], ["Veni, vidi, vici."], ["Carpe diem."], ["Alea iacta est."]]
)
# Launch the app
if __name__ == "__main__":
interface.launch(server_name="0.0.0.0", server_port=7860)
|