File size: 1,432 Bytes
7c96d4d
18dbafb
 
7c96d4d
18dbafb
 
 
 
 
 
 
 
 
 
 
f171064
2c5321c
26b6523
 
74484cc
 
28b3fcb
26b6523
 
d90a583
 
 
792cfa4
 
06dcddf
11bb364
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import gradio as gr
from transformers import AutoModelForSeq2SeqLM
from transformers import AutoTokenizer

model = AutoModelForSeq2SeqLM.from_pretrained('hackathon-pln-es/t5-small-spanish-nahuatl')
tokenizer = AutoTokenizer.from_pretrained('hackathon-pln-es/t5-small-spanish-nahuatl')

def predict(input):
  input_ids = tokenizer('translate Spanish to Nahuatl: ' + sentence, return_tensors='pt').input_ids
  outputs = model.generate(input_ids)
  outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
  return outputs

gr.Interface(
   fn=predict,
   inputs=gr.inputs.Textbox(lines=1, label="Input Text"),
   theme="peach",
   title='🌽 Spanish to Nahuatl Automatic Translation',
   description='This model is a T5 Transformer (t5-small) fine-tuned on 29,007 spanish and nahuatl sentences using 12,890 samples collected from the web and 16,117 samples from the Axolotl dataset. The dataset is normalized using "sep" normalization from py-elotl. For more details visit https://huggingface.co/hackathon-pln-es/t5-small-spanish-nahuatl',
   examples=[
     prefix+'hola',
     prefix+'conejo',
     prefix+'estrella',
     prefix+'te quiero mucho',
     prefix+'te amo',
     prefix+'quiero comer',
     prefix+'esto se llama agua',
     prefix+'te amo con todo mi corazón'],
   allow_flagging="manual",
   flagging_options=["right translation", "wrong translation", "error", "other"]
   ).launch(enable_queue=True)