File size: 564 Bytes
1a3fc6f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
import torch
from seamless_communication.inference import Translator
# Initialize a Translator object with a multitask model, vocoder on the GPU.
translator = Translator(
"seamlessM4T_v2_large", "vocoder_v2", torch.device("cuda:0"), torch.float16
)
def translate_text(text):
print("test")
# text_output, speech_output = translator.predict(
# input=text,
# task_str="T2ST",
# tgt_lang="spa",
# src_lang="eng",
# text_generation_opts=None,
# unit_generation_opts=None,
# )
# print(text_output)
|