Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer | |
from transformers import MBartForConditionalGeneration, MBart50TokenizerFast | |
def main(): | |
st.title("Translation App") | |
# Load model and tokenizer | |
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-one-to-many-mmt") | |
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-one-to-many-mmt", src_lang="en_XX") | |
# Input text area | |
input_text = st.text_area("Enter text to translate", "") | |
if st.button("Translate"): | |
# Perform translation | |
translated_text = translate_text(input_text, model, tokenizer) | |
# Display translated text | |
st.write("Translated Text:") | |
st.write(translated_text) | |
def translate_text(input_text, model, tokenizer): | |
# Tokenize input text | |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids | |
generated_tokens = model.generate( | |
**input_ids, | |
forced_bos_token_id=tokenizer.lang_code_to_id["hi_IN"] | |
) | |
# Decode translated text | |
translated_text = tokenizer.decode(generated_tokens[0], skip_special_tokens=True) | |
return translated_text | |
if __name__ == '__main__': | |
main() | |