LAP-DEV commited on
Commit
d4fb1d5
·
verified ·
1 Parent(s): 1036445

Delete modules/vad

Browse files
modules/vad/__init__.py DELETED
File without changes
modules/vad/silero_vad.py DELETED
@@ -1,264 +0,0 @@
1
- # Adapted from https://github.com/SYSTRAN/faster-whisper/blob/master/faster_whisper/vad.py
2
-
3
- from faster_whisper.vad import VadOptions, get_vad_model
4
- import numpy as np
5
- from typing import BinaryIO, Union, List, Optional, Tuple
6
- import warnings
7
- import faster_whisper
8
- from faster_whisper.transcribe import SpeechTimestampsMap, Segment
9
- import gradio as gr
10
-
11
-
12
- class SileroVAD:
13
- def __init__(self):
14
- self.sampling_rate = 16000
15
- self.window_size_samples = 512
16
- self.model = None
17
-
18
- def run(self,
19
- audio: Union[str, BinaryIO, np.ndarray],
20
- vad_parameters: VadOptions,
21
- progress: gr.Progress = gr.Progress()
22
- ) -> Tuple[np.ndarray, List[dict]]:
23
- """
24
- Run VAD
25
-
26
- Parameters
27
- ----------
28
- audio: Union[str, BinaryIO, np.ndarray]
29
- Audio path or file binary or Audio numpy array
30
- vad_parameters:
31
- Options for VAD processing.
32
- progress: gr.Progress
33
- Indicator to show progress directly in gradio.
34
-
35
- Returns
36
- ----------
37
- np.ndarray
38
- Pre-processed audio with VAD
39
- List[dict]
40
- Chunks of speeches to be used to restore the timestamps later
41
- """
42
-
43
- sampling_rate = self.sampling_rate
44
-
45
- if not isinstance(audio, np.ndarray):
46
- audio = faster_whisper.decode_audio(audio, sampling_rate=sampling_rate)
47
-
48
- duration = audio.shape[0] / sampling_rate
49
- duration_after_vad = duration
50
-
51
- if vad_parameters is None:
52
- vad_parameters = VadOptions()
53
- elif isinstance(vad_parameters, dict):
54
- vad_parameters = VadOptions(**vad_parameters)
55
- speech_chunks = self.get_speech_timestamps(
56
- audio=audio,
57
- vad_options=vad_parameters,
58
- progress=progress
59
- )
60
- audio = self.collect_chunks(audio, speech_chunks)
61
- duration_after_vad = audio.shape[0] / sampling_rate
62
-
63
- return audio, speech_chunks
64
-
65
- def get_speech_timestamps(
66
- self,
67
- audio: np.ndarray,
68
- vad_options: Optional[VadOptions] = None,
69
- progress: gr.Progress = gr.Progress(),
70
- **kwargs,
71
- ) -> List[dict]:
72
- """This method is used for splitting long audios into speech chunks using silero VAD.
73
-
74
- Args:
75
- audio: One dimensional float array.
76
- vad_options: Options for VAD processing.
77
- kwargs: VAD options passed as keyword arguments for backward compatibility.
78
- progress: Gradio progress to indicate progress.
79
-
80
- Returns:
81
- List of dicts containing begin and end samples of each speech chunk.
82
- """
83
-
84
- if self.model is None:
85
- self.update_model()
86
-
87
- if vad_options is None:
88
- vad_options = VadOptions(**kwargs)
89
-
90
- threshold = vad_options.threshold
91
- min_speech_duration_ms = vad_options.min_speech_duration_ms
92
- max_speech_duration_s = vad_options.max_speech_duration_s
93
- min_silence_duration_ms = vad_options.min_silence_duration_ms
94
- window_size_samples = self.window_size_samples
95
- speech_pad_ms = vad_options.speech_pad_ms
96
- sampling_rate = 16000
97
- min_speech_samples = sampling_rate * min_speech_duration_ms / 1000
98
- speech_pad_samples = sampling_rate * speech_pad_ms / 1000
99
- max_speech_samples = (
100
- sampling_rate * max_speech_duration_s
101
- - window_size_samples
102
- - 2 * speech_pad_samples
103
- )
104
- min_silence_samples = sampling_rate * min_silence_duration_ms / 1000
105
- min_silence_samples_at_max_speech = sampling_rate * 98 / 1000
106
-
107
- audio_length_samples = len(audio)
108
-
109
- state, context = self.model.get_initial_states(batch_size=1)
110
-
111
- speech_probs = []
112
- for current_start_sample in range(0, audio_length_samples, window_size_samples):
113
- progress(current_start_sample/audio_length_samples, desc="Detecting speeches only using VAD...")
114
-
115
- chunk = audio[current_start_sample: current_start_sample + window_size_samples]
116
- if len(chunk) < window_size_samples:
117
- chunk = np.pad(chunk, (0, int(window_size_samples - len(chunk))))
118
- speech_prob, state, context = self.model(chunk, state, context, sampling_rate)
119
- speech_probs.append(speech_prob)
120
-
121
- triggered = False
122
- speeches = []
123
- current_speech = {}
124
- neg_threshold = threshold - 0.15
125
-
126
- # to save potential segment end (and tolerate some silence)
127
- temp_end = 0
128
- # to save potential segment limits in case of maximum segment size reached
129
- prev_end = next_start = 0
130
-
131
- for i, speech_prob in enumerate(speech_probs):
132
- if (speech_prob >= threshold) and temp_end:
133
- temp_end = 0
134
- if next_start < prev_end:
135
- next_start = window_size_samples * i
136
-
137
- if (speech_prob >= threshold) and not triggered:
138
- triggered = True
139
- current_speech["start"] = window_size_samples * i
140
- continue
141
-
142
- if (
143
- triggered
144
- and (window_size_samples * i) - current_speech["start"] > max_speech_samples
145
- ):
146
- if prev_end:
147
- current_speech["end"] = prev_end
148
- speeches.append(current_speech)
149
- current_speech = {}
150
- # previously reached silence (< neg_thres) and is still not speech (< thres)
151
- if next_start < prev_end:
152
- triggered = False
153
- else:
154
- current_speech["start"] = next_start
155
- prev_end = next_start = temp_end = 0
156
- else:
157
- current_speech["end"] = window_size_samples * i
158
- speeches.append(current_speech)
159
- current_speech = {}
160
- prev_end = next_start = temp_end = 0
161
- triggered = False
162
- continue
163
-
164
- if (speech_prob < neg_threshold) and triggered:
165
- if not temp_end:
166
- temp_end = window_size_samples * i
167
- # condition to avoid cutting in very short silence
168
- if (window_size_samples * i) - temp_end > min_silence_samples_at_max_speech:
169
- prev_end = temp_end
170
- if (window_size_samples * i) - temp_end < min_silence_samples:
171
- continue
172
- else:
173
- current_speech["end"] = temp_end
174
- if (
175
- current_speech["end"] - current_speech["start"]
176
- ) > min_speech_samples:
177
- speeches.append(current_speech)
178
- current_speech = {}
179
- prev_end = next_start = temp_end = 0
180
- triggered = False
181
- continue
182
-
183
- if (
184
- current_speech
185
- and (audio_length_samples - current_speech["start"]) > min_speech_samples
186
- ):
187
- current_speech["end"] = audio_length_samples
188
- speeches.append(current_speech)
189
-
190
- for i, speech in enumerate(speeches):
191
- if i == 0:
192
- speech["start"] = int(max(0, speech["start"] - speech_pad_samples))
193
- if i != len(speeches) - 1:
194
- silence_duration = speeches[i + 1]["start"] - speech["end"]
195
- if silence_duration < 2 * speech_pad_samples:
196
- speech["end"] += int(silence_duration // 2)
197
- speeches[i + 1]["start"] = int(
198
- max(0, speeches[i + 1]["start"] - silence_duration // 2)
199
- )
200
- else:
201
- speech["end"] = int(
202
- min(audio_length_samples, speech["end"] + speech_pad_samples)
203
- )
204
- speeches[i + 1]["start"] = int(
205
- max(0, speeches[i + 1]["start"] - speech_pad_samples)
206
- )
207
- else:
208
- speech["end"] = int(
209
- min(audio_length_samples, speech["end"] + speech_pad_samples)
210
- )
211
-
212
- return speeches
213
-
214
- def update_model(self):
215
- self.model = get_vad_model()
216
-
217
- @staticmethod
218
- def collect_chunks(audio: np.ndarray, chunks: List[dict]) -> np.ndarray:
219
- """Collects and concatenates audio chunks."""
220
- if not chunks:
221
- return np.array([], dtype=np.float32)
222
-
223
- return np.concatenate([audio[chunk["start"]: chunk["end"]] for chunk in chunks])
224
-
225
- @staticmethod
226
- def format_timestamp(
227
- seconds: float,
228
- always_include_hours: bool = False,
229
- decimal_marker: str = ".",
230
- ) -> str:
231
- assert seconds >= 0, "non-negative timestamp expected"
232
- milliseconds = round(seconds * 1000.0)
233
-
234
- hours = milliseconds // 3_600_000
235
- milliseconds -= hours * 3_600_000
236
-
237
- minutes = milliseconds // 60_000
238
- milliseconds -= minutes * 60_000
239
-
240
- seconds = milliseconds // 1_000
241
- milliseconds -= seconds * 1_000
242
-
243
- hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
244
- return (
245
- f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
246
- )
247
-
248
- def restore_speech_timestamps(
249
- self,
250
- segments: List[dict],
251
- speech_chunks: List[dict],
252
- sampling_rate: Optional[int] = None,
253
- ) -> List[dict]:
254
- if sampling_rate is None:
255
- sampling_rate = self.sampling_rate
256
-
257
- ts_map = SpeechTimestampsMap(speech_chunks, sampling_rate)
258
-
259
- for segment in segments:
260
- segment["start"] = ts_map.get_original_time(segment["start"])
261
- segment["end"] = ts_map.get_original_time(segment["end"])
262
-
263
- return segments
264
-