demo
Browse files
app.py
CHANGED
@@ -5,9 +5,12 @@ import librosa
|
|
5 |
import numpy as np
|
6 |
import torch
|
7 |
|
8 |
-
from transformers import
|
9 |
|
10 |
-
|
|
|
|
|
|
|
11 |
|
12 |
speaker_embeddings = {
|
13 |
"GGP": "spkemb/speaker0.npy",
|
@@ -21,7 +24,8 @@ def predict(text, speaker):
|
|
21 |
return (16000, np.zeros(0).astype(np.int16))
|
22 |
speaker_embedding = np.load(speaker_embeddings[speaker[:3]])
|
23 |
speaker_embedding = torch.tensor(speaker_embedding).unsqueeze(0)
|
24 |
-
|
|
|
25 |
speech = (speech.numpy() * 32767).astype(np.int16)
|
26 |
return (16000, speech)
|
27 |
|
|
|
5 |
import numpy as np
|
6 |
import torch
|
7 |
|
8 |
+
from transformers import SpeechT5ForTextToSpeech, SpeechT5Processor, SpeechT5HifiGan
|
9 |
|
10 |
+
checkpoint = "microsoft/speecht5_tts"
|
11 |
+
processor = SpeechT5Processor.from_pretrained(checkpoint)
|
12 |
+
model = SpeechT5ForTextToSpeech.from_pretrained("techiaith/microsoft_speecht5_finetuned_bu_tts_cy_en")
|
13 |
+
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
|
14 |
|
15 |
speaker_embeddings = {
|
16 |
"GGP": "spkemb/speaker0.npy",
|
|
|
24 |
return (16000, np.zeros(0).astype(np.int16))
|
25 |
speaker_embedding = np.load(speaker_embeddings[speaker[:3]])
|
26 |
speaker_embedding = torch.tensor(speaker_embedding).unsqueeze(0)
|
27 |
+
inputs = processor(text=text, return_tensors="pt")
|
28 |
+
speech = model.generate_speech(inputs["input_ids"], speaker_embedding, vocoder=vocoder)
|
29 |
speech = (speech.numpy() * 32767).astype(np.int16)
|
30 |
return (16000, speech)
|
31 |
|