AlexK-PL commited on
Commit
20916b9
·
1 Parent(s): 876e967

Delete utils.py

Browse files
Files changed (1) hide show
  1. utils.py +0 -39
utils.py DELETED
@@ -1,39 +0,0 @@
1
- import numpy as np
2
- from scipy.io.wavfile import read
3
- import torch
4
-
5
-
6
- def get_mask_from_lengths(lengths):
7
- max_len = torch.max(lengths).item()
8
- ids = torch.arange(0, max_len, out=torch.cuda.LongTensor(max_len))
9
- mask = (ids < lengths.unsqueeze(1)).byte()
10
- # mask = (ids < lengths.unsqueeze(1).cuda()).cpu()
11
- # mask = mask.byte()
12
- return mask
13
-
14
-
15
- # probably I won't use it from here
16
- def load_wav_to_torch(full_path, sr):
17
- sampling_rate, data = read(full_path)
18
- assert sr == sampling_rate, "{} SR doesn't match {} on path {}".format(
19
- sr, sampling_rate, full_path)
20
- return torch.FloatTensor(data.astype(np.float32))
21
-
22
-
23
- # probably I won't use it from here
24
- def load_filepaths_and_text(filename, sort_by_length, split="|"):
25
- with open(filename, encoding='utf-8') as f:
26
- filepaths_and_text = [line.strip().split(split) for line in f]
27
-
28
- if sort_by_length:
29
- filepaths_and_text.sort(key=lambda x: len(x[1]))
30
-
31
- return filepaths_and_text
32
-
33
-
34
- def to_gpu(x):
35
- x = x.contiguous()
36
-
37
- if torch.cuda.is_available():
38
- x = x.cuda(non_blocking=True) # I understand this lets asynchronous processing
39
- return torch.autograd.Variable(x)