Spaces:
Running
Running
File size: 1,992 Bytes
3aa4060 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
import sys,os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import numpy as np
import argparse
import torch
import librosa
from hubert import hubert_model
def load_audio(file: str, sr: int = 16000):
x, sr = librosa.load(file, sr=sr)
return x
def load_model(path, device):
model = hubert_model.hubert_soft(path)
model.eval()
if not (device == "cpu"):
model.half()
model.to(device)
return model
def pred_vec(model, wavPath, vecPath, device):
audio = load_audio(wavPath)
audln = audio.shape[0]
vec_a = []
idx_s = 0
while (idx_s + 20 * 16000 < audln):
feats = audio[idx_s:idx_s + 20 * 16000]
feats = torch.from_numpy(feats).to(device)
feats = feats[None, None, :]
if not (device == "cpu"):
feats = feats.half()
with torch.no_grad():
vec = model.units(feats).squeeze().data.cpu().float().numpy()
vec_a.extend(vec)
idx_s = idx_s + 20 * 16000
if (idx_s < audln):
feats = audio[idx_s:audln]
feats = torch.from_numpy(feats).to(device)
feats = feats[None, None, :]
if not (device == "cpu"):
feats = feats.half()
with torch.no_grad():
vec = model.units(feats).squeeze().data.cpu().float().numpy()
# print(vec.shape) # [length, dim=256] hop=320
vec_a.extend(vec)
np.save(vecPath, vec_a, allow_pickle=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--wav", help="wav", dest="wav")
parser.add_argument("-v", "--vec", help="vec", dest="vec")
args = parser.parse_args()
print(args.wav)
print(args.vec)
wavPath = args.wav
vecPath = args.vec
device = "cuda" if torch.cuda.is_available() else "cpu"
hubert = load_model(os.path.join(
"hubert_pretrain", "hubert-soft-0d54a1f4.pt"), device)
pred_vec(hubert, wavPath, vecPath, device)
|