File size: 1,871 Bytes
7c96d4d 18dbafb 7c96d4d 1ae55fd e6e654d 48dce21 e6e654d 1ae55fd 18dbafb b6287db 18dbafb 956e550 2c5321c 26b6523 74484cc 1271ce0 74b256d 1271ce0 1ae55fd 792cfa4 3c0fbbd 7acb40d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import gradio as gr
from transformers import AutoModelForSeq2SeqLM
from transformers import AutoTokenizer
article='''
# Team members
- Emilio Alejandro Morales [(milmor)](https://huggingface.co./milmor)
- Rodrigo Martínez Arzate [(rockdrigoma)](https://huggingface.co./rockdrigoma)
- Luis Armando Mercado [(luisarmando)](https://huggingface.co./luisarmando)
- Jacobo del Valle [(jjdv)](https://huggingface.co./jjdv)
'''
model = AutoModelForSeq2SeqLM.from_pretrained('hackathon-pln-es/t5-small-spanish-nahuatl')
tokenizer = AutoTokenizer.from_pretrained('hackathon-pln-es/t5-small-spanish-nahuatl')
def predict(input):
input_ids = tokenizer('translate Spanish to Nahuatl: ' + input, return_tensors='pt').input_ids
outputs = model.generate(input_ids)
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
return outputs
gr.Interface(
fn=predict,
inputs=gr.inputs.Textbox(lines=1, label="Input Text in Spanish"),
outputs=[
gr.outputs.Textbox(label="Translated text in Nahuatl"),
],
theme="peach",
title='🌽 Spanish to Nahuatl Automatic Translation',
description='This model is a T5 Transformer (t5-small) fine-tuned on 29,007 spanish and nahuatl sentences using 12,890 samples collected from the web and 16,117 samples from the Axolotl dataset. The dataset is normalized using "sep" normalization from py-elotl. For more details visit https://huggingface.co./hackathon-pln-es/t5-small-spanish-nahuatl',
examples=[
'hola',
'conejo',
'estrella',
'te quiero mucho',
'te amo',
'quiero comer',
'esto se llama agua',
'mi abuelo se llama Juan',
'te amo con todo mi corazón'],
article=article,
allow_flagging="manual",
flagging_options=["right translation", "wrong translation", "error", "other"],
flagging_dir="logs"
).launch(enable_queue=True, debug=True)
|