import torch | |
from seamless_communication.inference import Translator | |
# Initialize a Translator object with a multitask model, vocoder on the GPU. | |
translator = Translator( | |
"seamlessM4T_v2_large", "vocoder_v2", torch.device("cuda:0"), torch.float16 | |
) | |
def translate_text(text): | |
print("test") | |
# text_output, speech_output = translator.predict( | |
# input=text, | |
# task_str="T2ST", | |
# tgt_lang="spa", | |
# src_lang="eng", | |
# text_generation_opts=None, | |
# unit_generation_opts=None, | |
# ) | |
# print(text_output) | |