Upload whisper_parameter.py
Browse files
modules/whisper/whisper_parameter.py
ADDED
@@ -0,0 +1,369 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass, fields
|
2 |
+
import gradio as gr
|
3 |
+
from typing import Optional, Dict
|
4 |
+
import yaml
|
5 |
+
|
6 |
+
|
7 |
+
@dataclass
|
8 |
+
class WhisperParameters:
|
9 |
+
model_size: gr.Dropdown
|
10 |
+
lang: gr.Dropdown
|
11 |
+
is_translate: gr.Checkbox
|
12 |
+
beam_size: gr.Number
|
13 |
+
log_prob_threshold: gr.Number
|
14 |
+
no_speech_threshold: gr.Number
|
15 |
+
compute_type: gr.Dropdown
|
16 |
+
best_of: gr.Number
|
17 |
+
patience: gr.Number
|
18 |
+
condition_on_previous_text: gr.Checkbox
|
19 |
+
prompt_reset_on_temperature: gr.Slider
|
20 |
+
initial_prompt: gr.Textbox
|
21 |
+
temperature: gr.Slider
|
22 |
+
compression_ratio_threshold: gr.Number
|
23 |
+
vad_filter: gr.Checkbox
|
24 |
+
threshold: gr.Slider
|
25 |
+
min_speech_duration_ms: gr.Number
|
26 |
+
max_speech_duration_s: gr.Number
|
27 |
+
min_silence_duration_ms: gr.Number
|
28 |
+
speech_pad_ms: gr.Number
|
29 |
+
batch_size: gr.Number
|
30 |
+
is_diarize: gr.Checkbox
|
31 |
+
hf_token: gr.Textbox
|
32 |
+
diarization_device: gr.Dropdown
|
33 |
+
length_penalty: gr.Number
|
34 |
+
repetition_penalty: gr.Number
|
35 |
+
no_repeat_ngram_size: gr.Number
|
36 |
+
prefix: gr.Textbox
|
37 |
+
suppress_blank: gr.Checkbox
|
38 |
+
suppress_tokens: gr.Textbox
|
39 |
+
max_initial_timestamp: gr.Number
|
40 |
+
word_timestamps: gr.Checkbox
|
41 |
+
prepend_punctuations: gr.Textbox
|
42 |
+
append_punctuations: gr.Textbox
|
43 |
+
max_new_tokens: gr.Number
|
44 |
+
chunk_length: gr.Number
|
45 |
+
hallucination_silence_threshold: gr.Number
|
46 |
+
hotwords: gr.Textbox
|
47 |
+
language_detection_threshold: gr.Number
|
48 |
+
language_detection_segments: gr.Number
|
49 |
+
is_bgm_separate: gr.Checkbox
|
50 |
+
uvr_model_size: gr.Dropdown
|
51 |
+
uvr_device: gr.Dropdown
|
52 |
+
uvr_segment_size: gr.Number
|
53 |
+
uvr_save_file: gr.Checkbox
|
54 |
+
uvr_enable_offload: gr.Checkbox
|
55 |
+
"""
|
56 |
+
A data class for Gradio components of the Whisper Parameters. Use "before" Gradio pre-processing.
|
57 |
+
This data class is used to mitigate the key-value problem between Gradio components and function parameters.
|
58 |
+
Related Gradio issue: https://github.com/gradio-app/gradio/issues/2471
|
59 |
+
See more about Gradio pre-processing: https://www.gradio.app/docs/components
|
60 |
+
|
61 |
+
Attributes
|
62 |
+
----------
|
63 |
+
model_size: gr.Dropdown
|
64 |
+
Whisper model size.
|
65 |
+
|
66 |
+
lang: gr.Dropdown
|
67 |
+
Source language of the file to transcribe.
|
68 |
+
|
69 |
+
is_translate: gr.Checkbox
|
70 |
+
Boolean value that determines whether to translate to English.
|
71 |
+
It's Whisper's feature to translate speech from another language directly into English end-to-end.
|
72 |
+
|
73 |
+
beam_size: gr.Number
|
74 |
+
Int value that is used for decoding option.
|
75 |
+
|
76 |
+
log_prob_threshold: gr.Number
|
77 |
+
If the average log probability over sampled tokens is below this value, treat as failed.
|
78 |
+
|
79 |
+
no_speech_threshold: gr.Number
|
80 |
+
If the no_speech probability is higher than this value AND
|
81 |
+
the average log probability over sampled tokens is below `log_prob_threshold`,
|
82 |
+
consider the segment as silent.
|
83 |
+
|
84 |
+
compute_type: gr.Dropdown
|
85 |
+
compute type for transcription.
|
86 |
+
see more info : https://opennmt.net/CTranslate2/quantization.html
|
87 |
+
|
88 |
+
best_of: gr.Number
|
89 |
+
Number of candidates when sampling with non-zero temperature.
|
90 |
+
|
91 |
+
patience: gr.Number
|
92 |
+
Beam search patience factor.
|
93 |
+
|
94 |
+
condition_on_previous_text: gr.Checkbox
|
95 |
+
if True, the previous output of the model is provided as a prompt for the next window;
|
96 |
+
disabling may make the text inconsistent across windows, but the model becomes less prone to
|
97 |
+
getting stuck in a failure loop, such as repetition looping or timestamps going out of sync.
|
98 |
+
|
99 |
+
initial_prompt: gr.Textbox
|
100 |
+
Optional text to provide as a prompt for the first window. This can be used to provide, or
|
101 |
+
"prompt-engineer" a context for transcription, e.g. custom vocabularies or proper nouns
|
102 |
+
to make it more likely to predict those word correctly.
|
103 |
+
|
104 |
+
temperature: gr.Slider
|
105 |
+
Temperature for sampling. It can be a tuple of temperatures,
|
106 |
+
which will be successively used upon failures according to either
|
107 |
+
`compression_ratio_threshold` or `log_prob_threshold`.
|
108 |
+
|
109 |
+
compression_ratio_threshold: gr.Number
|
110 |
+
If the gzip compression ratio is above this value, treat as failed
|
111 |
+
|
112 |
+
vad_filter: gr.Checkbox
|
113 |
+
Enable the voice activity detection (VAD) to filter out parts of the audio
|
114 |
+
without speech. This step is using the Silero VAD model
|
115 |
+
https://github.com/snakers4/silero-vad.
|
116 |
+
|
117 |
+
threshold: gr.Slider
|
118 |
+
This parameter is related with Silero VAD. Speech threshold.
|
119 |
+
Silero VAD outputs speech probabilities for each audio chunk,
|
120 |
+
probabilities ABOVE this value are considered as SPEECH. It is better to tune this
|
121 |
+
parameter for each dataset separately, but "lazy" 0.5 is pretty good for most datasets.
|
122 |
+
|
123 |
+
min_speech_duration_ms: gr.Number
|
124 |
+
This parameter is related with Silero VAD. Final speech chunks shorter min_speech_duration_ms are thrown out.
|
125 |
+
|
126 |
+
max_speech_duration_s: gr.Number
|
127 |
+
This parameter is related with Silero VAD. Maximum duration of speech chunks in seconds. Chunks longer
|
128 |
+
than max_speech_duration_s will be split at the timestamp of the last silence that
|
129 |
+
lasts more than 100ms (if any), to prevent aggressive cutting. Otherwise, they will be
|
130 |
+
split aggressively just before max_speech_duration_s.
|
131 |
+
|
132 |
+
min_silence_duration_ms: gr.Number
|
133 |
+
This parameter is related with Silero VAD. In the end of each speech chunk wait for min_silence_duration_ms
|
134 |
+
before separating it
|
135 |
+
|
136 |
+
speech_pad_ms: gr.Number
|
137 |
+
This parameter is related with Silero VAD. Final speech chunks are padded by speech_pad_ms each side
|
138 |
+
|
139 |
+
batch_size: gr.Number
|
140 |
+
This parameter is related with insanely-fast-whisper pipe. Batch size to pass to the pipe
|
141 |
+
|
142 |
+
is_diarize: gr.Checkbox
|
143 |
+
This parameter is related with whisperx. Boolean value that determines whether to diarize or not.
|
144 |
+
|
145 |
+
hf_token: gr.Textbox
|
146 |
+
This parameter is related with whisperx. Huggingface token is needed to download diarization models.
|
147 |
+
Read more about : https://huggingface.co/pyannote/speaker-diarization-3.1#requirements
|
148 |
+
|
149 |
+
diarization_device: gr.Dropdown
|
150 |
+
This parameter is related with whisperx. Device to run diarization model
|
151 |
+
|
152 |
+
length_penalty: gr.Number
|
153 |
+
This parameter is related to faster-whisper. Exponential length penalty constant.
|
154 |
+
|
155 |
+
repetition_penalty: gr.Number
|
156 |
+
This parameter is related to faster-whisper. Penalty applied to the score of previously generated tokens
|
157 |
+
(set > 1 to penalize).
|
158 |
+
|
159 |
+
no_repeat_ngram_size: gr.Number
|
160 |
+
This parameter is related to faster-whisper. Prevent repetitions of n-grams with this size (set 0 to disable).
|
161 |
+
|
162 |
+
prefix: gr.Textbox
|
163 |
+
This parameter is related to faster-whisper. Optional text to provide as a prefix for the first window.
|
164 |
+
|
165 |
+
suppress_blank: gr.Checkbox
|
166 |
+
This parameter is related to faster-whisper. Suppress blank outputs at the beginning of the sampling.
|
167 |
+
|
168 |
+
suppress_tokens: gr.Textbox
|
169 |
+
This parameter is related to faster-whisper. List of token IDs to suppress. -1 will suppress a default set
|
170 |
+
of symbols as defined in the model config.json file.
|
171 |
+
|
172 |
+
max_initial_timestamp: gr.Number
|
173 |
+
This parameter is related to faster-whisper. The initial timestamp cannot be later than this.
|
174 |
+
|
175 |
+
word_timestamps: gr.Checkbox
|
176 |
+
This parameter is related to faster-whisper. Extract word-level timestamps using the cross-attention pattern
|
177 |
+
and dynamic time warping, and include the timestamps for each word in each segment.
|
178 |
+
|
179 |
+
prepend_punctuations: gr.Textbox
|
180 |
+
This parameter is related to faster-whisper. If word_timestamps is True, merge these punctuation symbols
|
181 |
+
with the next word.
|
182 |
+
|
183 |
+
append_punctuations: gr.Textbox
|
184 |
+
This parameter is related to faster-whisper. If word_timestamps is True, merge these punctuation symbols
|
185 |
+
with the previous word.
|
186 |
+
|
187 |
+
max_new_tokens: gr.Number
|
188 |
+
This parameter is related to faster-whisper. Maximum number of new tokens to generate per-chunk. If not set,
|
189 |
+
the maximum will be set by the default max_length.
|
190 |
+
|
191 |
+
chunk_length: gr.Number
|
192 |
+
This parameter is related to faster-whisper and insanely-fast-whisper. The length of audio segments in seconds.
|
193 |
+
If it is not None, it will overwrite the default chunk_length of the FeatureExtractor.
|
194 |
+
|
195 |
+
hallucination_silence_threshold: gr.Number
|
196 |
+
This parameter is related to faster-whisper. When word_timestamps is True, skip silent periods longer than this threshold
|
197 |
+
(in seconds) when a possible hallucination is detected.
|
198 |
+
|
199 |
+
hotwords: gr.Textbox
|
200 |
+
This parameter is related to faster-whisper. Hotwords/hint phrases to provide the model with. Has no effect if prefix is not None.
|
201 |
+
|
202 |
+
language_detection_threshold: gr.Number
|
203 |
+
This parameter is related to faster-whisper. If the maximum probability of the language tokens is higher than this value, the language is detected.
|
204 |
+
|
205 |
+
language_detection_segments: gr.Number
|
206 |
+
This parameter is related to faster-whisper. Number of segments to consider for the language detection.
|
207 |
+
|
208 |
+
is_separate_bgm: gr.Checkbox
|
209 |
+
This parameter is related to UVR. Boolean value that determines whether to separate bgm or not.
|
210 |
+
|
211 |
+
uvr_model_size: gr.Dropdown
|
212 |
+
This parameter is related to UVR. UVR model size.
|
213 |
+
|
214 |
+
uvr_device: gr.Dropdown
|
215 |
+
This parameter is related to UVR. Device to run UVR model.
|
216 |
+
|
217 |
+
uvr_segment_size: gr.Number
|
218 |
+
This parameter is related to UVR. Segment size for UVR model.
|
219 |
+
|
220 |
+
uvr_save_file: gr.Checkbox
|
221 |
+
This parameter is related to UVR. Boolean value that determines whether to save the file or not.
|
222 |
+
|
223 |
+
uvr_enable_offload: gr.Checkbox
|
224 |
+
This parameter is related to UVR. Boolean value that determines whether to offload the UVR model or not
|
225 |
+
after each transcription.
|
226 |
+
"""
|
227 |
+
|
228 |
+
def as_list(self) -> list:
|
229 |
+
"""
|
230 |
+
Converts the data class attributes into a list, Use in Gradio UI before Gradio pre-processing.
|
231 |
+
See more about Gradio pre-processing: : https://www.gradio.app/docs/components
|
232 |
+
|
233 |
+
Returns
|
234 |
+
----------
|
235 |
+
A list of Gradio components
|
236 |
+
"""
|
237 |
+
return [getattr(self, f.name) for f in fields(self)]
|
238 |
+
|
239 |
+
@staticmethod
|
240 |
+
def as_value(*args) -> 'WhisperValues':
|
241 |
+
"""
|
242 |
+
To use Whisper parameters in function after Gradio post-processing.
|
243 |
+
See more about Gradio post-processing: : https://www.gradio.app/docs/components
|
244 |
+
|
245 |
+
Returns
|
246 |
+
----------
|
247 |
+
WhisperValues
|
248 |
+
Data class that has values of parameters
|
249 |
+
"""
|
250 |
+
return WhisperValues(*args)
|
251 |
+
|
252 |
+
|
253 |
+
@dataclass
|
254 |
+
class WhisperValues:
|
255 |
+
model_size: str = "large-v2"
|
256 |
+
lang: Optional[str] = None
|
257 |
+
is_translate: bool = False
|
258 |
+
beam_size: int = 5
|
259 |
+
log_prob_threshold: float = -1.0
|
260 |
+
no_speech_threshold: float = 0.6
|
261 |
+
compute_type: str = "float16"
|
262 |
+
best_of: int = 5
|
263 |
+
patience: float = 1.0
|
264 |
+
condition_on_previous_text: bool = True
|
265 |
+
prompt_reset_on_temperature: float = 0.5
|
266 |
+
initial_prompt: Optional[str] = None
|
267 |
+
temperature: float = 0.0
|
268 |
+
compression_ratio_threshold: float = 2.4
|
269 |
+
vad_filter: bool = False
|
270 |
+
threshold: float = 0.5
|
271 |
+
min_speech_duration_ms: int = 250
|
272 |
+
max_speech_duration_s: float = float("inf")
|
273 |
+
min_silence_duration_ms: int = 2000
|
274 |
+
speech_pad_ms: int = 400
|
275 |
+
batch_size: int = 24
|
276 |
+
is_diarize: bool = False
|
277 |
+
hf_token: str = ""
|
278 |
+
diarization_device: str = "cuda"
|
279 |
+
length_penalty: float = 1.0
|
280 |
+
repetition_penalty: float = 1.0
|
281 |
+
no_repeat_ngram_size: int = 0
|
282 |
+
prefix: Optional[str] = None
|
283 |
+
suppress_blank: bool = True
|
284 |
+
suppress_tokens: Optional[str] = "[-1]"
|
285 |
+
max_initial_timestamp: float = 0.0
|
286 |
+
word_timestamps: bool = False
|
287 |
+
prepend_punctuations: Optional[str] = "\"'“¿([{-"
|
288 |
+
append_punctuations: Optional[str] = "\"'.。,,!!??::”)]}、"
|
289 |
+
max_new_tokens: Optional[int] = None
|
290 |
+
chunk_length: Optional[int] = 30
|
291 |
+
hallucination_silence_threshold: Optional[float] = None
|
292 |
+
hotwords: Optional[str] = None
|
293 |
+
language_detection_threshold: Optional[float] = None
|
294 |
+
language_detection_segments: int = 1
|
295 |
+
is_bgm_separate: bool = False
|
296 |
+
uvr_model_size: str = "UVR-MDX-NET-Inst_HQ_4"
|
297 |
+
uvr_device: str = "cuda"
|
298 |
+
uvr_segment_size: int = 256
|
299 |
+
uvr_save_file: bool = False
|
300 |
+
uvr_enable_offload: bool = True
|
301 |
+
"""
|
302 |
+
A data class to use Whisper parameters.
|
303 |
+
"""
|
304 |
+
|
305 |
+
def to_yaml(self) -> Dict:
|
306 |
+
data = {
|
307 |
+
"whisper": {
|
308 |
+
"model_size": self.model_size,
|
309 |
+
"lang": "Automatic Detection" if self.lang is None else self.lang,
|
310 |
+
"is_translate": self.is_translate,
|
311 |
+
"beam_size": self.beam_size,
|
312 |
+
"log_prob_threshold": self.log_prob_threshold,
|
313 |
+
"no_speech_threshold": self.no_speech_threshold,
|
314 |
+
"best_of": self.best_of,
|
315 |
+
"patience": self.patience,
|
316 |
+
"condition_on_previous_text": self.condition_on_previous_text,
|
317 |
+
"prompt_reset_on_temperature": self.prompt_reset_on_temperature,
|
318 |
+
"initial_prompt": None if not self.initial_prompt else self.initial_prompt,
|
319 |
+
"temperature": self.temperature,
|
320 |
+
"compression_ratio_threshold": self.compression_ratio_threshold,
|
321 |
+
"batch_size": self.batch_size,
|
322 |
+
"length_penalty": self.length_penalty,
|
323 |
+
"repetition_penalty": self.repetition_penalty,
|
324 |
+
"no_repeat_ngram_size": self.no_repeat_ngram_size,
|
325 |
+
"prefix": None if not self.prefix else self.prefix,
|
326 |
+
"suppress_blank": self.suppress_blank,
|
327 |
+
"suppress_tokens": self.suppress_tokens,
|
328 |
+
"max_initial_timestamp": self.max_initial_timestamp,
|
329 |
+
"word_timestamps": self.word_timestamps,
|
330 |
+
"prepend_punctuations": self.prepend_punctuations,
|
331 |
+
"append_punctuations": self.append_punctuations,
|
332 |
+
"max_new_tokens": self.max_new_tokens,
|
333 |
+
"chunk_length": self.chunk_length,
|
334 |
+
"hallucination_silence_threshold": self.hallucination_silence_threshold,
|
335 |
+
"hotwords": None if not self.hotwords else self.hotwords,
|
336 |
+
"language_detection_threshold": self.language_detection_threshold,
|
337 |
+
"language_detection_segments": self.language_detection_segments,
|
338 |
+
},
|
339 |
+
"vad": {
|
340 |
+
"vad_filter": self.vad_filter,
|
341 |
+
"threshold": self.threshold,
|
342 |
+
"min_speech_duration_ms": self.min_speech_duration_ms,
|
343 |
+
"max_speech_duration_s": self.max_speech_duration_s,
|
344 |
+
"min_silence_duration_ms": self.min_silence_duration_ms,
|
345 |
+
"speech_pad_ms": self.speech_pad_ms,
|
346 |
+
},
|
347 |
+
"diarization": {
|
348 |
+
"is_diarize": self.is_diarize,
|
349 |
+
"hf_token": self.hf_token
|
350 |
+
},
|
351 |
+
"bgm_separation": {
|
352 |
+
"is_separate_bgm": self.is_bgm_separate,
|
353 |
+
"model_size": self.uvr_model_size,
|
354 |
+
"segment_size": self.uvr_segment_size,
|
355 |
+
"save_file": self.uvr_save_file,
|
356 |
+
"enable_offload": self.uvr_enable_offload
|
357 |
+
},
|
358 |
+
}
|
359 |
+
return data
|
360 |
+
|
361 |
+
def as_list(self) -> list:
|
362 |
+
"""
|
363 |
+
Converts the data class attributes into a list
|
364 |
+
|
365 |
+
Returns
|
366 |
+
----------
|
367 |
+
A list of Whisper parameters
|
368 |
+
"""
|
369 |
+
return [getattr(self, f.name) for f in fields(self)]
|