|
|
|
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
|
import fasttext
|
|
from huggingface_hub import hf_hub_download
|
|
|
|
|
|
class TranslateFromAny2XModel:
|
|
def __init__(self, nllb_model_path: str, fasttext_model_path: str, target_language="eng_Latn"):
|
|
"""Initialize the model with paths for NLLB and FastText NLLB LID models."""
|
|
self.nllb_model_path = nllb_model_path
|
|
self.fasttext_model_path = fasttext_model_path
|
|
self.target_language = target_language
|
|
|
|
|
|
self.model = AutoModelForSeq2SeqLM.from_pretrained(nllb_model_path)
|
|
self.tokenizer = AutoTokenizer.from_pretrained(nllb_model_path)
|
|
|
|
|
|
self.fasttext_model = fasttext.load_model(fasttext_model_path)
|
|
|
|
def generate(self, prompt: str) -> str:
|
|
"""Translates the input prompt to target_language using the NLLB model and source language detection using fastText LID model."""
|
|
self.tokenizer.src_lang = self.fasttext_model.predict(prompt)[0][0].replace("__label__", "")
|
|
inputs = self.tokenizer(prompt, return_tensors="pt")
|
|
output_tokens = self.model.generate(**inputs, forced_bos_token_id=self.tokenizer.convert_tokens_to_ids(self.target_language))[0]
|
|
output = self.tokenizer.decode(output_tokens, skip_special_tokens=True)
|
|
return output
|
|
|