yangwang825 commited on
Commit
62c1558
·
verified ·
1 Parent(s): 210822a

Upload feature extractor

Browse files
feature_extraction_xvector.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Union
2
+
3
+ import numpy as np
4
+ from rich import print
5
+
6
+ from transformers.feature_extraction_utils import BatchFeature
7
+ from transformers.utils import PaddingStrategy, TensorType
8
+ from transformers.feature_extraction_sequence_utils import SequenceFeatureExtractor
9
+
10
+
11
+ class XVectorFeatureExtractor(SequenceFeatureExtractor):
12
+
13
+ model_input_names = ["input_values", "attention_mask"]
14
+
15
+ def __init__(
16
+ self,
17
+ feature_size=1,
18
+ sampling_rate=16000,
19
+ padding_value=0.0,
20
+ return_attention_mask=False,
21
+ do_normalize=True,
22
+ **kwargs,
23
+ ):
24
+ super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
25
+ self.return_attention_mask = return_attention_mask
26
+ self.do_normalize = do_normalize
27
+
28
+ @staticmethod
29
+ def zero_mean_unit_var_norm(
30
+ input_values: List[np.ndarray], attention_mask: List[np.ndarray], padding_value: float = 0.0
31
+ ) -> List[np.ndarray]:
32
+ """
33
+ Every array in the list is normalized to have zero mean and unit variance
34
+ """
35
+ if attention_mask is not None:
36
+ attention_mask = np.array(attention_mask, np.int32)
37
+ normed_input_values = []
38
+
39
+ for vector, length in zip(input_values, attention_mask.sum(-1)):
40
+ normed_slice = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
41
+ if length < normed_slice.shape[0]:
42
+ normed_slice[length:] = padding_value
43
+
44
+ normed_input_values.append(normed_slice)
45
+ else:
46
+ normed_input_values = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
47
+
48
+ return normed_input_values
49
+
50
+ def __call__(
51
+ self,
52
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
53
+ padding: Union[bool, str, PaddingStrategy] = False,
54
+ max_length: Optional[int] = None,
55
+ truncation: bool = False,
56
+ pad_to_multiple_of: Optional[int] = None,
57
+ return_attention_mask: Optional[bool] = None,
58
+ return_tensors: Optional[Union[str, TensorType]] = None,
59
+ sampling_rate: Optional[int] = None,
60
+ **kwargs,
61
+ ) -> BatchFeature:
62
+ if sampling_rate is not None:
63
+ if sampling_rate != self.sampling_rate:
64
+ raise ValueError(
65
+ f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
66
+ f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
67
+ f" {self.sampling_rate} and not {sampling_rate}."
68
+ )
69
+ else:
70
+ print(
71
+ "It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
72
+ "Failing to do so can result in silent errors that might be hard to debug."
73
+ )
74
+
75
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
76
+ if is_batched_numpy and len(raw_speech.shape) > 2:
77
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
78
+ is_batched = is_batched_numpy or (
79
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
80
+ )
81
+
82
+ # always return batch
83
+ if not is_batched:
84
+ raw_speech = [raw_speech]
85
+
86
+ # convert into correct format for padding
87
+ encoded_inputs = BatchFeature({"input_values": raw_speech})
88
+
89
+ padded_inputs = self.pad(
90
+ encoded_inputs,
91
+ padding=padding,
92
+ max_length=max_length,
93
+ truncation=truncation,
94
+ pad_to_multiple_of=pad_to_multiple_of,
95
+ return_attention_mask=return_attention_mask,
96
+ )
97
+
98
+ # convert input values to correct format
99
+ input_values = padded_inputs["input_values"]
100
+ if not isinstance(input_values[0], np.ndarray):
101
+ padded_inputs["input_values"] = [np.asarray(array, dtype=np.float32) for array in input_values]
102
+ elif (
103
+ not isinstance(input_values, np.ndarray)
104
+ and isinstance(input_values[0], np.ndarray)
105
+ and input_values[0].dtype is np.dtype(np.float64)
106
+ ):
107
+ padded_inputs["input_values"] = [array.astype(np.float32) for array in input_values]
108
+ elif isinstance(input_values, np.ndarray) and input_values.dtype is np.dtype(np.float64):
109
+ padded_inputs["input_values"] = input_values.astype(np.float32)
110
+
111
+ # convert attention_mask to correct format
112
+ attention_mask = padded_inputs.get("attention_mask")
113
+ if attention_mask is not None:
114
+ padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
115
+
116
+ # zero-mean and unit-variance normalization
117
+ if self.do_normalize:
118
+ attention_mask = (
119
+ attention_mask
120
+ if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD
121
+ else None
122
+ )
123
+ padded_inputs["input_values"] = self.zero_mean_unit_var_norm(
124
+ padded_inputs["input_values"], attention_mask=attention_mask, padding_value=self.padding_value
125
+ )
126
+
127
+ if return_tensors is not None:
128
+ padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
129
+
130
+ return padded_inputs
preprocessor_config.json CHANGED
@@ -1,4 +1,7 @@
1
  {
 
 
 
2
  "do_normalize": true,
3
  "feature_extractor_type": "XVectorFeatureExtractor",
4
  "feature_size": 1,
 
1
  {
2
+ "auto_map": {
3
+ "AutoFeatureExtractor": "feature_extraction_xvector.XVectorFeatureExtractor"
4
+ },
5
  "do_normalize": true,
6
  "feature_extractor_type": "XVectorFeatureExtractor",
7
  "feature_size": 1,