Update app.py
Browse files
app.py
CHANGED
@@ -1,359 +1,360 @@
|
|
1 |
-
import os
|
2 |
-
import argparse
|
3 |
-
import gradio as gr
|
4 |
-
import yaml
|
5 |
-
|
6 |
-
from modules.utils.paths import (FASTER_WHISPER_MODELS_DIR, DIARIZATION_MODELS_DIR, OUTPUT_DIR, WHISPER_MODELS_DIR,
|
7 |
-
INSANELY_FAST_WHISPER_MODELS_DIR, NLLB_MODELS_DIR, DEFAULT_PARAMETERS_CONFIG_PATH,
|
8 |
-
UVR_MODELS_DIR)
|
9 |
-
from modules.utils.files_manager import load_yaml
|
10 |
-
from modules.whisper.whisper_factory import WhisperFactory
|
11 |
-
from modules.whisper.faster_whisper_inference import FasterWhisperInference
|
12 |
-
from modules.whisper.insanely_fast_whisper_inference import InsanelyFastWhisperInference
|
13 |
-
from modules.translation.nllb_inference import NLLBInference
|
14 |
-
from modules.ui.htmls import *
|
15 |
-
from modules.utils.cli_manager import str2bool
|
16 |
-
from modules.utils.youtube_manager import get_ytmetas
|
17 |
-
from modules.translation.deepl_api import DeepLAPI
|
18 |
-
from modules.whisper.whisper_parameter import *
|
19 |
-
|
20 |
-
### Device info ###
|
21 |
-
import torch
|
22 |
-
import torchaudio
|
23 |
-
import torch.cuda as cuda
|
24 |
-
import platform
|
25 |
-
from transformers import __version__ as transformers_version
|
26 |
-
|
27 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
28 |
-
num_gpus = cuda.device_count() if torch.cuda.is_available() else 0
|
29 |
-
cuda_version = torch.version.cuda if torch.cuda.is_available() else "N/A"
|
30 |
-
cudnn_version = torch.backends.cudnn.version() if torch.cuda.is_available() else "N/A"
|
31 |
-
os_info = platform.system() + " " + platform.release() + " " + platform.machine()
|
32 |
-
|
33 |
-
# Get the available VRAM for each GPU (if available)
|
34 |
-
vram_info = []
|
35 |
-
if torch.cuda.is_available():
|
36 |
-
for i in range(cuda.device_count()):
|
37 |
-
gpu_properties = cuda.get_device_properties(i)
|
38 |
-
vram_info.append(f"**GPU {i}: {gpu_properties.total_memory / 1024**3:.2f} GB**")
|
39 |
-
|
40 |
-
pytorch_version = torch.__version__
|
41 |
-
torchaudio_version = torchaudio.__version__ if 'torchaudio' in dir() else "N/A"
|
42 |
-
|
43 |
-
device_info = f"""Running on: **{device}**
|
44 |
-
|
45 |
-
Number of GPUs available: **{num_gpus}**
|
46 |
-
|
47 |
-
CUDA version: **{cuda_version}**
|
48 |
-
|
49 |
-
CuDNN version: **{cudnn_version}**
|
50 |
-
|
51 |
-
PyTorch version: **{pytorch_version}**
|
52 |
-
|
53 |
-
Torchaudio version: **{torchaudio_version}**
|
54 |
-
|
55 |
-
Transformers version: **{transformers_version}**
|
56 |
-
|
57 |
-
Operating system: **{os_info}**
|
58 |
-
|
59 |
-
Available VRAM:
|
60 |
-
\t {', '.join(vram_info) if vram_info else '**N/A**'}
|
61 |
-
"""
|
62 |
-
### End Device info ###
|
63 |
-
|
64 |
-
class App:
|
65 |
-
def __init__(self, args):
|
66 |
-
self.args = args
|
67 |
-
#self.app = gr.Blocks(css=CSS, theme=self.args.theme, delete_cache=(60, 3600))
|
68 |
-
self.app = gr.Blocks(css=CSS, theme=gr.themes.Ocean(), delete_cache=(60, 3600))
|
69 |
-
self.whisper_inf = WhisperFactory.create_whisper_inference(
|
70 |
-
whisper_type=self.args.whisper_type,
|
71 |
-
whisper_model_dir=self.args.whisper_model_dir,
|
72 |
-
faster_whisper_model_dir=self.args.faster_whisper_model_dir,
|
73 |
-
insanely_fast_whisper_model_dir=self.args.insanely_fast_whisper_model_dir,
|
74 |
-
uvr_model_dir=self.args.uvr_model_dir,
|
75 |
-
output_dir=self.args.output_dir,
|
76 |
-
)
|
77 |
-
self.nllb_inf = NLLBInference(
|
78 |
-
model_dir=self.args.nllb_model_dir,
|
79 |
-
output_dir=os.path.join(self.args.output_dir, "translations")
|
80 |
-
)
|
81 |
-
self.deepl_api = DeepLAPI(
|
82 |
-
output_dir=os.path.join(self.args.output_dir, "translations")
|
83 |
-
)
|
84 |
-
self.default_params = load_yaml(DEFAULT_PARAMETERS_CONFIG_PATH)
|
85 |
-
print(f"Use \"{self.args.whisper_type}\" implementation")
|
86 |
-
print(f"Device \"{self.whisper_inf.device}\" is detected")
|
87 |
-
|
88 |
-
def create_whisper_parameters(self):
|
89 |
-
|
90 |
-
whisper_params = self.default_params["whisper"]
|
91 |
-
diarization_params = self.default_params["diarization"]
|
92 |
-
vad_params = self.default_params["vad"]
|
93 |
-
uvr_params = self.default_params["bgm_separation"]
|
94 |
-
|
95 |
-
with gr.Row():
|
96 |
-
dd_model = gr.Dropdown(choices=self.whisper_inf.available_models, value=whisper_params["model_size"],label="Model")
|
97 |
-
dd_lang = gr.Dropdown(choices=["Automatic Detection"] + self.whisper_inf.available_langs,value=whisper_params["lang"], label="Language")
|
98 |
-
#dd_file_format = gr.Dropdown(choices=["SRT", "WebVTT", "txt"], value="SRT", label="File Format")
|
99 |
-
dd_file_format = gr.Dropdown(choices=["SRT", "txt"], value="SRT", label="Output format")
|
100 |
-
|
101 |
-
with gr.Row():
|
102 |
-
cb_timestamp = gr.Checkbox(value=whisper_params["add_timestamp"], label="Add timestamp to output file",interactive=True)
|
103 |
-
cb_diarize = gr.Checkbox(label="Speaker diarization", value=diarization_params["is_diarize"])
|
104 |
-
cb_translate = gr.Checkbox(value=whisper_params["is_translate"], label="Translate to English",interactive=True)
|
105 |
-
|
106 |
-
with gr.Accordion("Diarization options", open=False):
|
107 |
-
tb_hf_token = gr.Text(label="HuggingFace Token", value=diarization_params["hf_token"],
|
108 |
-
info="This is only needed the first time you download the model. If you already have"
|
109 |
-
" models, you don't need to enter. To download the model, you must manually go "
|
110 |
-
"to \"https://huggingface.co/pyannote/speaker-diarization-3.1\" and agree to"
|
111 |
-
" their requirement.")
|
112 |
-
dd_diarization_device = gr.Dropdown(label="Device",
|
113 |
-
choices=self.whisper_inf.diarizer.get_available_device(),
|
114 |
-
value=self.whisper_inf.diarizer.get_device())
|
115 |
-
|
116 |
-
with gr.Accordion("Advanced options", open=False):
|
117 |
-
nb_beam_size = gr.Number(label="Beam Size", value=whisper_params["beam_size"], precision=0, interactive=True,
|
118 |
-
info="Beam size to use for decoding.")
|
119 |
-
nb_log_prob_threshold = gr.Number(label="Log Probability Threshold", value=whisper_params["log_prob_threshold"], interactive=True,
|
120 |
-
info="If the average log probability over sampled tokens is below this value, treat as failed.")
|
121 |
-
nb_no_speech_threshold = gr.Number(label="No Speech Threshold", value=whisper_params["no_speech_threshold"], interactive=True,
|
122 |
-
info="If the no speech probability is higher than this value AND the average log probability over sampled tokens is below 'Log Prob Threshold', consider the segment as silent.")
|
123 |
-
dd_compute_type = gr.Dropdown(label="Compute Type", choices=self.whisper_inf.available_compute_types,
|
124 |
-
value=self.whisper_inf.current_compute_type, interactive=True,
|
125 |
-
allow_custom_value=True,
|
126 |
-
info="Select the type of computation to perform.")
|
127 |
-
nb_best_of = gr.Number(label="Best Of", value=whisper_params["best_of"], interactive=True,
|
128 |
-
info="Number of candidates when sampling with non-zero temperature.")
|
129 |
-
nb_patience = gr.Number(label="Patience", value=whisper_params["patience"], interactive=True,
|
130 |
-
info="Beam search patience factor.")
|
131 |
-
cb_condition_on_previous_text = gr.Checkbox(label="Condition On Previous Text", value=whisper_params["condition_on_previous_text"],
|
132 |
-
interactive=True,
|
133 |
-
info="Condition on previous text during decoding.")
|
134 |
-
sld_prompt_reset_on_temperature = gr.Slider(label="Prompt Reset On Temperature", value=whisper_params["prompt_reset_on_temperature"],
|
135 |
-
minimum=0, maximum=1, step=0.01, interactive=True,
|
136 |
-
info="Resets prompt if temperature is above this value."
|
137 |
-
" Arg has effect only if 'Condition On Previous Text' is True.")
|
138 |
-
tb_initial_prompt = gr.Textbox(label="Initial Prompt", value=None, interactive=True,
|
139 |
-
info="Initial prompt to use for decoding.")
|
140 |
-
sd_temperature = gr.Slider(label="Temperature", value=whisper_params["temperature"], minimum=0.0,
|
141 |
-
step=0.01, maximum=1.0, interactive=True,
|
142 |
-
info="Temperature for sampling. It can be a tuple of temperatures, which will be successively used upon failures according to either `Compression Ratio Threshold` or `Log Prob Threshold`.")
|
143 |
-
nb_compression_ratio_threshold = gr.Number(label="Compression Ratio Threshold", value=whisper_params["compression_ratio_threshold"],
|
144 |
-
interactive=True,
|
145 |
-
info="If the gzip compression ratio is above this value, treat as failed.")
|
146 |
-
nb_chunk_length = gr.Number(label="Chunk Length (s)", value=lambda: whisper_params["chunk_length"],
|
147 |
-
precision=0,
|
148 |
-
info="The length of audio segments. If it is not None, it will overwrite the default chunk_length of the FeatureExtractor.")
|
149 |
-
with gr.Group(visible=isinstance(self.whisper_inf, FasterWhisperInference)):
|
150 |
-
nb_length_penalty = gr.Number(label="Length Penalty", value=whisper_params["length_penalty"],
|
151 |
-
info="Exponential length penalty constant.")
|
152 |
-
nb_repetition_penalty = gr.Number(label="Repetition Penalty", value=whisper_params["repetition_penalty"],
|
153 |
-
info="Penalty applied to the score of previously generated tokens (set > 1 to penalize).")
|
154 |
-
nb_no_repeat_ngram_size = gr.Number(label="No Repeat N-gram Size", value=whisper_params["no_repeat_ngram_size"],
|
155 |
-
precision=0,
|
156 |
-
info="Prevent repetitions of n-grams with this size (set 0 to disable).")
|
157 |
-
tb_prefix = gr.Textbox(label="Prefix", value=lambda: whisper_params["prefix"],
|
158 |
-
info="Optional text to provide as a prefix for the first window.")
|
159 |
-
cb_suppress_blank = gr.Checkbox(label="Suppress Blank", value=whisper_params["suppress_blank"],
|
160 |
-
info="Suppress blank outputs at the beginning of the sampling.")
|
161 |
-
tb_suppress_tokens = gr.Textbox(label="Suppress Tokens", value=whisper_params["suppress_tokens"],
|
162 |
-
info="List of token IDs to suppress. -1 will suppress a default set of symbols as defined in the model config.json file.")
|
163 |
-
nb_max_initial_timestamp = gr.Number(label="Max Initial Timestamp", value=whisper_params["max_initial_timestamp"],
|
164 |
-
info="The initial timestamp cannot be later than this.")
|
165 |
-
cb_word_timestamps = gr.Checkbox(label="Word Timestamps", value=whisper_params["word_timestamps"],
|
166 |
-
info="Extract word-level timestamps using the cross-attention pattern and dynamic time warping, and include the timestamps for each word in each segment.")
|
167 |
-
tb_prepend_punctuations = gr.Textbox(label="Prepend Punctuations", value=whisper_params["prepend_punctuations"],
|
168 |
-
info="If 'Word Timestamps' is True, merge these punctuation symbols with the next word.")
|
169 |
-
tb_append_punctuations = gr.Textbox(label="Append Punctuations", value=whisper_params["append_punctuations"],
|
170 |
-
info="If 'Word Timestamps' is True, merge these punctuation symbols with the previous word.")
|
171 |
-
nb_max_new_tokens = gr.Number(label="Max New Tokens", value=lambda: whisper_params["max_new_tokens"],
|
172 |
-
precision=0,
|
173 |
-
info="Maximum number of new tokens to generate per-chunk. If not set, the maximum will be set by the default max_length.")
|
174 |
-
nb_hallucination_silence_threshold = gr.Number(label="Hallucination Silence Threshold (sec)",
|
175 |
-
value=lambda: whisper_params["hallucination_silence_threshold"],
|
176 |
-
info="When 'Word Timestamps' is True, skip silent periods longer than this threshold (in seconds) when a possible hallucination is detected.")
|
177 |
-
tb_hotwords = gr.Textbox(label="Hotwords", value=lambda: whisper_params["hotwords"],
|
178 |
-
info="Hotwords/hint phrases to provide the model with. Has no effect if prefix is not None.")
|
179 |
-
nb_language_detection_threshold = gr.Number(label="Language Detection Threshold", value=lambda: whisper_params["language_detection_threshold"],
|
180 |
-
info="If the maximum probability of the language tokens is higher than this value, the language is detected.")
|
181 |
-
nb_language_detection_segments = gr.Number(label="Language Detection Segments", value=lambda: whisper_params["language_detection_segments"],
|
182 |
-
precision=0,
|
183 |
-
info="Number of segments to consider for the language detection.")
|
184 |
-
with gr.Group(visible=isinstance(self.whisper_inf, InsanelyFastWhisperInference)):
|
185 |
-
nb_batch_size = gr.Number(label="Batch Size", value=whisper_params["batch_size"], precision=0)
|
186 |
-
|
187 |
-
with gr.Accordion("Background Music Remover Filter", open=False):
|
188 |
-
cb_bgm_separation = gr.Checkbox(label="Enable Background Music Remover Filter", value=uvr_params["is_separate_bgm"],
|
189 |
-
interactive=True,
|
190 |
-
info="Enabling this will remove background music by submodel before"
|
191 |
-
" transcribing ")
|
192 |
-
dd_uvr_device = gr.Dropdown(label="Device", value=self.whisper_inf.music_separator.device,
|
193 |
-
choices=self.whisper_inf.music_separator.available_devices)
|
194 |
-
dd_uvr_model_size = gr.Dropdown(label="Model", value=uvr_params["model_size"],
|
195 |
-
choices=self.whisper_inf.music_separator.available_models)
|
196 |
-
nb_uvr_segment_size = gr.Number(label="Segment Size", value=uvr_params["segment_size"], precision=0)
|
197 |
-
cb_uvr_save_file = gr.Checkbox(label="Save separated files to output", value=uvr_params["save_file"])
|
198 |
-
cb_uvr_enable_offload = gr.Checkbox(label="Offload sub model after removing background music",
|
199 |
-
value=uvr_params["enable_offload"])
|
200 |
-
|
201 |
-
with gr.Accordion("Voice Detection Filter", open=False):
|
202 |
-
cb_vad_filter = gr.Checkbox(label="Enable Silero VAD Filter", value=vad_params["vad_filter"],
|
203 |
-
interactive=True,
|
204 |
-
info="Enable this to transcribe only detected voice parts by submodel.")
|
205 |
-
sd_threshold = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Speech Threshold",
|
206 |
-
value=vad_params["threshold"],
|
207 |
-
info="Lower it to be more sensitive to small sounds.")
|
208 |
-
nb_min_speech_duration_ms = gr.Number(label="Minimum Speech Duration (ms)", precision=0,
|
209 |
-
value=vad_params["min_speech_duration_ms"],
|
210 |
-
info="Final speech chunks shorter than this time are thrown out")
|
211 |
-
nb_max_speech_duration_s = gr.Number(label="Maximum Speech Duration (s)",
|
212 |
-
value=vad_params["max_speech_duration_s"],
|
213 |
-
info="Maximum duration of speech chunks in \"seconds\".")
|
214 |
-
nb_min_silence_duration_ms = gr.Number(label="Minimum Silence Duration (ms)", precision=0,
|
215 |
-
value=vad_params["min_silence_duration_ms"],
|
216 |
-
info="In the end of each speech chunk wait for this time"
|
217 |
-
" before separating it")
|
218 |
-
nb_speech_pad_ms = gr.Number(label="Speech Padding (ms)", precision=0, value=vad_params["speech_pad_ms"],
|
219 |
-
info="Final speech chunks are padded by this time each side")
|
220 |
-
|
221 |
-
dd_model.change(fn=self.on_change_models, inputs=[dd_model], outputs=[cb_translate])
|
222 |
-
|
223 |
-
return (
|
224 |
-
WhisperParameters(
|
225 |
-
model_size=dd_model, lang=dd_lang, is_translate=cb_translate, beam_size=nb_beam_size,
|
226 |
-
log_prob_threshold=nb_log_prob_threshold, no_speech_threshold=nb_no_speech_threshold,
|
227 |
-
compute_type=dd_compute_type, best_of=nb_best_of, patience=nb_patience,
|
228 |
-
condition_on_previous_text=cb_condition_on_previous_text, initial_prompt=tb_initial_prompt,
|
229 |
-
temperature=sd_temperature, compression_ratio_threshold=nb_compression_ratio_threshold,
|
230 |
-
vad_filter=cb_vad_filter, threshold=sd_threshold, min_speech_duration_ms=nb_min_speech_duration_ms,
|
231 |
-
max_speech_duration_s=nb_max_speech_duration_s, min_silence_duration_ms=nb_min_silence_duration_ms,
|
232 |
-
speech_pad_ms=nb_speech_pad_ms, chunk_length=nb_chunk_length, batch_size=nb_batch_size,
|
233 |
-
is_diarize=cb_diarize, hf_token=tb_hf_token, diarization_device=dd_diarization_device,
|
234 |
-
length_penalty=nb_length_penalty, repetition_penalty=nb_repetition_penalty,
|
235 |
-
no_repeat_ngram_size=nb_no_repeat_ngram_size, prefix=tb_prefix, suppress_blank=cb_suppress_blank,
|
236 |
-
suppress_tokens=tb_suppress_tokens, max_initial_timestamp=nb_max_initial_timestamp,
|
237 |
-
word_timestamps=cb_word_timestamps, prepend_punctuations=tb_prepend_punctuations,
|
238 |
-
append_punctuations=tb_append_punctuations, max_new_tokens=nb_max_new_tokens,
|
239 |
-
hallucination_silence_threshold=nb_hallucination_silence_threshold, hotwords=tb_hotwords,
|
240 |
-
language_detection_threshold=nb_language_detection_threshold,
|
241 |
-
language_detection_segments=nb_language_detection_segments,
|
242 |
-
prompt_reset_on_temperature=sld_prompt_reset_on_temperature, is_bgm_separate=cb_bgm_separation,
|
243 |
-
uvr_device=dd_uvr_device, uvr_model_size=dd_uvr_model_size, uvr_segment_size=nb_uvr_segment_size,
|
244 |
-
uvr_save_file=cb_uvr_save_file, uvr_enable_offload=cb_uvr_enable_offload
|
245 |
-
),
|
246 |
-
dd_file_format,
|
247 |
-
cb_timestamp
|
248 |
-
)
|
249 |
-
|
250 |
-
def launch(self):
|
251 |
-
translation_params = self.default_params["translation"]
|
252 |
-
deepl_params = translation_params["deepl"]
|
253 |
-
nllb_params = translation_params["nllb"]
|
254 |
-
uvr_params = self.default_params["bgm_separation"]
|
255 |
-
|
256 |
-
with self.app:
|
257 |
-
with gr.Row():
|
258 |
-
with gr.Column():
|
259 |
-
gr.Markdown(MARKDOWN, elem_id="md_project")
|
260 |
-
with gr.Tabs():
|
261 |
-
with gr.TabItem("Audio"): # tab1
|
262 |
-
with gr.Column():
|
263 |
-
#input_file = gr.Files(type="filepath", label="Upload File here")
|
264 |
-
input_file = gr.Audio(type='filepath', elem_id="audio_input")
|
265 |
-
tb_input_folder = gr.Textbox(label="Input Folder Path (Optional)",
|
266 |
-
info="Optional: Specify the folder path where the input files are located, if you prefer to use local files instead of uploading them."
|
267 |
-
" Leave this field empty if you do not wish to use a local path.",
|
268 |
-
visible=self.args.colab,
|
269 |
-
value="")
|
270 |
-
|
271 |
-
whisper_params, dd_file_format, cb_timestamp = self.create_whisper_parameters()
|
272 |
-
|
273 |
-
with gr.Row():
|
274 |
-
btn_run = gr.Button("Transcribe", variant="primary")
|
275 |
-
btn_reset = gr.Button(value="Reset")
|
276 |
-
btn_reset.click(None,js="window.location.reload()")
|
277 |
-
with gr.Row():
|
278 |
-
with gr.Column(scale=3):
|
279 |
-
tb_indicator = gr.Textbox(label="Output result")
|
280 |
-
with gr.Column(scale=1):
|
281 |
-
tb_info = gr.Textbox(label="Output info", interactive=False, scale=3)
|
282 |
-
files_subtitles = gr.Files(label="Output file", interactive=False, scale=2)
|
283 |
-
# btn_openfolder = gr.Button('📂', scale=1)
|
284 |
-
|
285 |
-
params = [input_file, tb_input_folder, dd_file_format, cb_timestamp]
|
286 |
-
btn_run.click(fn=self.whisper_inf.transcribe_file,
|
287 |
-
inputs=params + whisper_params.as_list(),
|
288 |
-
outputs=[tb_indicator, files_subtitles, tb_info])
|
289 |
-
# btn_openfolder.click(fn=lambda: self.open_folder("outputs"), inputs=None, outputs=None)
|
290 |
-
|
291 |
-
with gr.TabItem("Device info"): # tab2
|
292 |
-
with gr.Column():
|
293 |
-
gr.Markdown(device_info, label="Hardware info & installed packages")
|
294 |
-
|
295 |
-
# Launch the app with optional gradio settings
|
296 |
-
args = self.args
|
297 |
-
|
298 |
-
self.app.queue(
|
299 |
-
api_open=args.api_open
|
300 |
-
).launch(
|
301 |
-
share=args.share,
|
302 |
-
server_name=args.server_name,
|
303 |
-
server_port=args.server_port,
|
304 |
-
auth=(args.username, args.password) if args.username and args.password else None,
|
305 |
-
root_path=args.root_path,
|
306 |
-
inbrowser=args.inbrowser
|
307 |
-
)
|
308 |
-
|
309 |
-
@staticmethod
|
310 |
-
def open_folder(folder_path: str):
|
311 |
-
if os.path.exists(folder_path):
|
312 |
-
os.system(f"start {folder_path}")
|
313 |
-
else:
|
314 |
-
os.makedirs(folder_path, exist_ok=True)
|
315 |
-
print(f"The directory path {folder_path} has newly created.")
|
316 |
-
|
317 |
-
@staticmethod
|
318 |
-
def on_change_models(model_size: str):
|
319 |
-
translatable_model = ["large", "large-v1", "large-v2", "large-v3"]
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
parser
|
329 |
-
parser
|
330 |
-
|
331 |
-
|
332 |
-
parser.add_argument('--
|
333 |
-
parser.add_argument('--
|
334 |
-
parser.add_argument('--
|
335 |
-
parser.add_argument('--
|
336 |
-
parser.add_argument('--
|
337 |
-
parser.add_argument('--
|
338 |
-
parser.add_argument('--
|
339 |
-
parser.add_argument('--
|
340 |
-
parser.add_argument('--
|
341 |
-
parser.add_argument('--
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
app
|
|
|
|
1 |
+
import os
|
2 |
+
import argparse
|
3 |
+
import gradio as gr
|
4 |
+
import yaml
|
5 |
+
|
6 |
+
from modules.utils.paths import (FASTER_WHISPER_MODELS_DIR, DIARIZATION_MODELS_DIR, OUTPUT_DIR, WHISPER_MODELS_DIR,
|
7 |
+
INSANELY_FAST_WHISPER_MODELS_DIR, NLLB_MODELS_DIR, DEFAULT_PARAMETERS_CONFIG_PATH,
|
8 |
+
UVR_MODELS_DIR)
|
9 |
+
from modules.utils.files_manager import load_yaml
|
10 |
+
from modules.whisper.whisper_factory import WhisperFactory
|
11 |
+
from modules.whisper.faster_whisper_inference import FasterWhisperInference
|
12 |
+
from modules.whisper.insanely_fast_whisper_inference import InsanelyFastWhisperInference
|
13 |
+
from modules.translation.nllb_inference import NLLBInference
|
14 |
+
from modules.ui.htmls import *
|
15 |
+
from modules.utils.cli_manager import str2bool
|
16 |
+
from modules.utils.youtube_manager import get_ytmetas
|
17 |
+
from modules.translation.deepl_api import DeepLAPI
|
18 |
+
from modules.whisper.whisper_parameter import *
|
19 |
+
|
20 |
+
### Device info ###
|
21 |
+
import torch
|
22 |
+
import torchaudio
|
23 |
+
import torch.cuda as cuda
|
24 |
+
import platform
|
25 |
+
from transformers import __version__ as transformers_version
|
26 |
+
|
27 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
28 |
+
num_gpus = cuda.device_count() if torch.cuda.is_available() else 0
|
29 |
+
cuda_version = torch.version.cuda if torch.cuda.is_available() else "N/A"
|
30 |
+
cudnn_version = torch.backends.cudnn.version() if torch.cuda.is_available() else "N/A"
|
31 |
+
os_info = platform.system() + " " + platform.release() + " " + platform.machine()
|
32 |
+
|
33 |
+
# Get the available VRAM for each GPU (if available)
|
34 |
+
vram_info = []
|
35 |
+
if torch.cuda.is_available():
|
36 |
+
for i in range(cuda.device_count()):
|
37 |
+
gpu_properties = cuda.get_device_properties(i)
|
38 |
+
vram_info.append(f"**GPU {i}: {gpu_properties.total_memory / 1024**3:.2f} GB**")
|
39 |
+
|
40 |
+
pytorch_version = torch.__version__
|
41 |
+
torchaudio_version = torchaudio.__version__ if 'torchaudio' in dir() else "N/A"
|
42 |
+
|
43 |
+
device_info = f"""Running on: **{device}**
|
44 |
+
|
45 |
+
Number of GPUs available: **{num_gpus}**
|
46 |
+
|
47 |
+
CUDA version: **{cuda_version}**
|
48 |
+
|
49 |
+
CuDNN version: **{cudnn_version}**
|
50 |
+
|
51 |
+
PyTorch version: **{pytorch_version}**
|
52 |
+
|
53 |
+
Torchaudio version: **{torchaudio_version}**
|
54 |
+
|
55 |
+
Transformers version: **{transformers_version}**
|
56 |
+
|
57 |
+
Operating system: **{os_info}**
|
58 |
+
|
59 |
+
Available VRAM:
|
60 |
+
\t {', '.join(vram_info) if vram_info else '**N/A**'}
|
61 |
+
"""
|
62 |
+
### End Device info ###
|
63 |
+
|
64 |
+
class App:
|
65 |
+
def __init__(self, args):
|
66 |
+
self.args = args
|
67 |
+
#self.app = gr.Blocks(css=CSS, theme=self.args.theme, delete_cache=(60, 3600))
|
68 |
+
self.app = gr.Blocks(css=CSS, theme=gr.themes.Ocean(), delete_cache=(60, 3600))
|
69 |
+
self.whisper_inf = WhisperFactory.create_whisper_inference(
|
70 |
+
whisper_type=self.args.whisper_type,
|
71 |
+
whisper_model_dir=self.args.whisper_model_dir,
|
72 |
+
faster_whisper_model_dir=self.args.faster_whisper_model_dir,
|
73 |
+
insanely_fast_whisper_model_dir=self.args.insanely_fast_whisper_model_dir,
|
74 |
+
uvr_model_dir=self.args.uvr_model_dir,
|
75 |
+
output_dir=self.args.output_dir,
|
76 |
+
)
|
77 |
+
self.nllb_inf = NLLBInference(
|
78 |
+
model_dir=self.args.nllb_model_dir,
|
79 |
+
output_dir=os.path.join(self.args.output_dir, "translations")
|
80 |
+
)
|
81 |
+
self.deepl_api = DeepLAPI(
|
82 |
+
output_dir=os.path.join(self.args.output_dir, "translations")
|
83 |
+
)
|
84 |
+
self.default_params = load_yaml(DEFAULT_PARAMETERS_CONFIG_PATH)
|
85 |
+
print(f"Use \"{self.args.whisper_type}\" implementation")
|
86 |
+
print(f"Device \"{self.whisper_inf.device}\" is detected")
|
87 |
+
|
88 |
+
def create_whisper_parameters(self):
|
89 |
+
|
90 |
+
whisper_params = self.default_params["whisper"]
|
91 |
+
diarization_params = self.default_params["diarization"]
|
92 |
+
vad_params = self.default_params["vad"]
|
93 |
+
uvr_params = self.default_params["bgm_separation"]
|
94 |
+
|
95 |
+
with gr.Row():
|
96 |
+
dd_model = gr.Dropdown(choices=self.whisper_inf.available_models, value=whisper_params["model_size"],label="Model")
|
97 |
+
dd_lang = gr.Dropdown(choices=["Automatic Detection"] + self.whisper_inf.available_langs,value=whisper_params["lang"], label="Language")
|
98 |
+
#dd_file_format = gr.Dropdown(choices=["SRT", "WebVTT", "txt"], value="SRT", label="File Format")
|
99 |
+
dd_file_format = gr.Dropdown(choices=["SRT", "txt"], value="SRT", label="Output format")
|
100 |
+
|
101 |
+
with gr.Row():
|
102 |
+
cb_timestamp = gr.Checkbox(value=whisper_params["add_timestamp"], label="Add timestamp to output file",interactive=True)
|
103 |
+
cb_diarize = gr.Checkbox(label="Speaker diarization", value=diarization_params["is_diarize"])
|
104 |
+
cb_translate = gr.Checkbox(value=whisper_params["is_translate"], label="Translate to English",interactive=True)
|
105 |
+
|
106 |
+
with gr.Accordion("Diarization options", open=False):
|
107 |
+
tb_hf_token = gr.Text(label="HuggingFace Token", value=diarization_params["hf_token"],
|
108 |
+
info="This is only needed the first time you download the model. If you already have"
|
109 |
+
" models, you don't need to enter. To download the model, you must manually go "
|
110 |
+
"to \"https://huggingface.co/pyannote/speaker-diarization-3.1\" and agree to"
|
111 |
+
" their requirement.")
|
112 |
+
dd_diarization_device = gr.Dropdown(label="Device",
|
113 |
+
choices=self.whisper_inf.diarizer.get_available_device(),
|
114 |
+
value=self.whisper_inf.diarizer.get_device())
|
115 |
+
|
116 |
+
with gr.Accordion("Advanced options", open=False):
|
117 |
+
nb_beam_size = gr.Number(label="Beam Size", value=whisper_params["beam_size"], precision=0, interactive=True,
|
118 |
+
info="Beam size to use for decoding.")
|
119 |
+
nb_log_prob_threshold = gr.Number(label="Log Probability Threshold", value=whisper_params["log_prob_threshold"], interactive=True,
|
120 |
+
info="If the average log probability over sampled tokens is below this value, treat as failed.")
|
121 |
+
nb_no_speech_threshold = gr.Number(label="No Speech Threshold", value=whisper_params["no_speech_threshold"], interactive=True,
|
122 |
+
info="If the no speech probability is higher than this value AND the average log probability over sampled tokens is below 'Log Prob Threshold', consider the segment as silent.")
|
123 |
+
dd_compute_type = gr.Dropdown(label="Compute Type", choices=self.whisper_inf.available_compute_types,
|
124 |
+
value=self.whisper_inf.current_compute_type, interactive=True,
|
125 |
+
allow_custom_value=True,
|
126 |
+
info="Select the type of computation to perform.")
|
127 |
+
nb_best_of = gr.Number(label="Best Of", value=whisper_params["best_of"], interactive=True,
|
128 |
+
info="Number of candidates when sampling with non-zero temperature.")
|
129 |
+
nb_patience = gr.Number(label="Patience", value=whisper_params["patience"], interactive=True,
|
130 |
+
info="Beam search patience factor.")
|
131 |
+
cb_condition_on_previous_text = gr.Checkbox(label="Condition On Previous Text", value=whisper_params["condition_on_previous_text"],
|
132 |
+
interactive=True,
|
133 |
+
info="Condition on previous text during decoding.")
|
134 |
+
sld_prompt_reset_on_temperature = gr.Slider(label="Prompt Reset On Temperature", value=whisper_params["prompt_reset_on_temperature"],
|
135 |
+
minimum=0, maximum=1, step=0.01, interactive=True,
|
136 |
+
info="Resets prompt if temperature is above this value."
|
137 |
+
" Arg has effect only if 'Condition On Previous Text' is True.")
|
138 |
+
tb_initial_prompt = gr.Textbox(label="Initial Prompt", value=None, interactive=True,
|
139 |
+
info="Initial prompt to use for decoding.")
|
140 |
+
sd_temperature = gr.Slider(label="Temperature", value=whisper_params["temperature"], minimum=0.0,
|
141 |
+
step=0.01, maximum=1.0, interactive=True,
|
142 |
+
info="Temperature for sampling. It can be a tuple of temperatures, which will be successively used upon failures according to either `Compression Ratio Threshold` or `Log Prob Threshold`.")
|
143 |
+
nb_compression_ratio_threshold = gr.Number(label="Compression Ratio Threshold", value=whisper_params["compression_ratio_threshold"],
|
144 |
+
interactive=True,
|
145 |
+
info="If the gzip compression ratio is above this value, treat as failed.")
|
146 |
+
nb_chunk_length = gr.Number(label="Chunk Length (s)", value=lambda: whisper_params["chunk_length"],
|
147 |
+
precision=0,
|
148 |
+
info="The length of audio segments. If it is not None, it will overwrite the default chunk_length of the FeatureExtractor.")
|
149 |
+
with gr.Group(visible=isinstance(self.whisper_inf, FasterWhisperInference)):
|
150 |
+
nb_length_penalty = gr.Number(label="Length Penalty", value=whisper_params["length_penalty"],
|
151 |
+
info="Exponential length penalty constant.")
|
152 |
+
nb_repetition_penalty = gr.Number(label="Repetition Penalty", value=whisper_params["repetition_penalty"],
|
153 |
+
info="Penalty applied to the score of previously generated tokens (set > 1 to penalize).")
|
154 |
+
nb_no_repeat_ngram_size = gr.Number(label="No Repeat N-gram Size", value=whisper_params["no_repeat_ngram_size"],
|
155 |
+
precision=0,
|
156 |
+
info="Prevent repetitions of n-grams with this size (set 0 to disable).")
|
157 |
+
tb_prefix = gr.Textbox(label="Prefix", value=lambda: whisper_params["prefix"],
|
158 |
+
info="Optional text to provide as a prefix for the first window.")
|
159 |
+
cb_suppress_blank = gr.Checkbox(label="Suppress Blank", value=whisper_params["suppress_blank"],
|
160 |
+
info="Suppress blank outputs at the beginning of the sampling.")
|
161 |
+
tb_suppress_tokens = gr.Textbox(label="Suppress Tokens", value=whisper_params["suppress_tokens"],
|
162 |
+
info="List of token IDs to suppress. -1 will suppress a default set of symbols as defined in the model config.json file.")
|
163 |
+
nb_max_initial_timestamp = gr.Number(label="Max Initial Timestamp", value=whisper_params["max_initial_timestamp"],
|
164 |
+
info="The initial timestamp cannot be later than this.")
|
165 |
+
cb_word_timestamps = gr.Checkbox(label="Word Timestamps", value=whisper_params["word_timestamps"],
|
166 |
+
info="Extract word-level timestamps using the cross-attention pattern and dynamic time warping, and include the timestamps for each word in each segment.")
|
167 |
+
tb_prepend_punctuations = gr.Textbox(label="Prepend Punctuations", value=whisper_params["prepend_punctuations"],
|
168 |
+
info="If 'Word Timestamps' is True, merge these punctuation symbols with the next word.")
|
169 |
+
tb_append_punctuations = gr.Textbox(label="Append Punctuations", value=whisper_params["append_punctuations"],
|
170 |
+
info="If 'Word Timestamps' is True, merge these punctuation symbols with the previous word.")
|
171 |
+
nb_max_new_tokens = gr.Number(label="Max New Tokens", value=lambda: whisper_params["max_new_tokens"],
|
172 |
+
precision=0,
|
173 |
+
info="Maximum number of new tokens to generate per-chunk. If not set, the maximum will be set by the default max_length.")
|
174 |
+
nb_hallucination_silence_threshold = gr.Number(label="Hallucination Silence Threshold (sec)",
|
175 |
+
value=lambda: whisper_params["hallucination_silence_threshold"],
|
176 |
+
info="When 'Word Timestamps' is True, skip silent periods longer than this threshold (in seconds) when a possible hallucination is detected.")
|
177 |
+
tb_hotwords = gr.Textbox(label="Hotwords", value=lambda: whisper_params["hotwords"],
|
178 |
+
info="Hotwords/hint phrases to provide the model with. Has no effect if prefix is not None.")
|
179 |
+
nb_language_detection_threshold = gr.Number(label="Language Detection Threshold", value=lambda: whisper_params["language_detection_threshold"],
|
180 |
+
info="If the maximum probability of the language tokens is higher than this value, the language is detected.")
|
181 |
+
nb_language_detection_segments = gr.Number(label="Language Detection Segments", value=lambda: whisper_params["language_detection_segments"],
|
182 |
+
precision=0,
|
183 |
+
info="Number of segments to consider for the language detection.")
|
184 |
+
with gr.Group(visible=isinstance(self.whisper_inf, InsanelyFastWhisperInference)):
|
185 |
+
nb_batch_size = gr.Number(label="Batch Size", value=whisper_params["batch_size"], precision=0)
|
186 |
+
|
187 |
+
with gr.Accordion("Background Music Remover Filter", open=False):
|
188 |
+
cb_bgm_separation = gr.Checkbox(label="Enable Background Music Remover Filter", value=uvr_params["is_separate_bgm"],
|
189 |
+
interactive=True,
|
190 |
+
info="Enabling this will remove background music by submodel before"
|
191 |
+
" transcribing ")
|
192 |
+
dd_uvr_device = gr.Dropdown(label="Device", value=self.whisper_inf.music_separator.device,
|
193 |
+
choices=self.whisper_inf.music_separator.available_devices)
|
194 |
+
dd_uvr_model_size = gr.Dropdown(label="Model", value=uvr_params["model_size"],
|
195 |
+
choices=self.whisper_inf.music_separator.available_models)
|
196 |
+
nb_uvr_segment_size = gr.Number(label="Segment Size", value=uvr_params["segment_size"], precision=0)
|
197 |
+
cb_uvr_save_file = gr.Checkbox(label="Save separated files to output", value=uvr_params["save_file"])
|
198 |
+
cb_uvr_enable_offload = gr.Checkbox(label="Offload sub model after removing background music",
|
199 |
+
value=uvr_params["enable_offload"])
|
200 |
+
|
201 |
+
with gr.Accordion("Voice Detection Filter", open=False):
|
202 |
+
cb_vad_filter = gr.Checkbox(label="Enable Silero VAD Filter", value=vad_params["vad_filter"],
|
203 |
+
interactive=True,
|
204 |
+
info="Enable this to transcribe only detected voice parts by submodel.")
|
205 |
+
sd_threshold = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Speech Threshold",
|
206 |
+
value=vad_params["threshold"],
|
207 |
+
info="Lower it to be more sensitive to small sounds.")
|
208 |
+
nb_min_speech_duration_ms = gr.Number(label="Minimum Speech Duration (ms)", precision=0,
|
209 |
+
value=vad_params["min_speech_duration_ms"],
|
210 |
+
info="Final speech chunks shorter than this time are thrown out")
|
211 |
+
nb_max_speech_duration_s = gr.Number(label="Maximum Speech Duration (s)",
|
212 |
+
value=vad_params["max_speech_duration_s"],
|
213 |
+
info="Maximum duration of speech chunks in \"seconds\".")
|
214 |
+
nb_min_silence_duration_ms = gr.Number(label="Minimum Silence Duration (ms)", precision=0,
|
215 |
+
value=vad_params["min_silence_duration_ms"],
|
216 |
+
info="In the end of each speech chunk wait for this time"
|
217 |
+
" before separating it")
|
218 |
+
nb_speech_pad_ms = gr.Number(label="Speech Padding (ms)", precision=0, value=vad_params["speech_pad_ms"],
|
219 |
+
info="Final speech chunks are padded by this time each side")
|
220 |
+
|
221 |
+
dd_model.change(fn=self.on_change_models, inputs=[dd_model], outputs=[cb_translate])
|
222 |
+
|
223 |
+
return (
|
224 |
+
WhisperParameters(
|
225 |
+
model_size=dd_model, lang=dd_lang, is_translate=cb_translate, beam_size=nb_beam_size,
|
226 |
+
log_prob_threshold=nb_log_prob_threshold, no_speech_threshold=nb_no_speech_threshold,
|
227 |
+
compute_type=dd_compute_type, best_of=nb_best_of, patience=nb_patience,
|
228 |
+
condition_on_previous_text=cb_condition_on_previous_text, initial_prompt=tb_initial_prompt,
|
229 |
+
temperature=sd_temperature, compression_ratio_threshold=nb_compression_ratio_threshold,
|
230 |
+
vad_filter=cb_vad_filter, threshold=sd_threshold, min_speech_duration_ms=nb_min_speech_duration_ms,
|
231 |
+
max_speech_duration_s=nb_max_speech_duration_s, min_silence_duration_ms=nb_min_silence_duration_ms,
|
232 |
+
speech_pad_ms=nb_speech_pad_ms, chunk_length=nb_chunk_length, batch_size=nb_batch_size,
|
233 |
+
is_diarize=cb_diarize, hf_token=tb_hf_token, diarization_device=dd_diarization_device,
|
234 |
+
length_penalty=nb_length_penalty, repetition_penalty=nb_repetition_penalty,
|
235 |
+
no_repeat_ngram_size=nb_no_repeat_ngram_size, prefix=tb_prefix, suppress_blank=cb_suppress_blank,
|
236 |
+
suppress_tokens=tb_suppress_tokens, max_initial_timestamp=nb_max_initial_timestamp,
|
237 |
+
word_timestamps=cb_word_timestamps, prepend_punctuations=tb_prepend_punctuations,
|
238 |
+
append_punctuations=tb_append_punctuations, max_new_tokens=nb_max_new_tokens,
|
239 |
+
hallucination_silence_threshold=nb_hallucination_silence_threshold, hotwords=tb_hotwords,
|
240 |
+
language_detection_threshold=nb_language_detection_threshold,
|
241 |
+
language_detection_segments=nb_language_detection_segments,
|
242 |
+
prompt_reset_on_temperature=sld_prompt_reset_on_temperature, is_bgm_separate=cb_bgm_separation,
|
243 |
+
uvr_device=dd_uvr_device, uvr_model_size=dd_uvr_model_size, uvr_segment_size=nb_uvr_segment_size,
|
244 |
+
uvr_save_file=cb_uvr_save_file, uvr_enable_offload=cb_uvr_enable_offload
|
245 |
+
),
|
246 |
+
dd_file_format,
|
247 |
+
cb_timestamp
|
248 |
+
)
|
249 |
+
|
250 |
+
def launch(self):
|
251 |
+
translation_params = self.default_params["translation"]
|
252 |
+
deepl_params = translation_params["deepl"]
|
253 |
+
nllb_params = translation_params["nllb"]
|
254 |
+
uvr_params = self.default_params["bgm_separation"]
|
255 |
+
|
256 |
+
with self.app:
|
257 |
+
with gr.Row():
|
258 |
+
with gr.Column():
|
259 |
+
gr.Markdown(MARKDOWN, elem_id="md_project")
|
260 |
+
with gr.Tabs():
|
261 |
+
with gr.TabItem("Audio"): # tab1
|
262 |
+
with gr.Column():
|
263 |
+
#input_file = gr.Files(type="filepath", label="Upload File here")
|
264 |
+
input_file = gr.Audio(type='filepath', elem_id="audio_input")
|
265 |
+
tb_input_folder = gr.Textbox(label="Input Folder Path (Optional)",
|
266 |
+
info="Optional: Specify the folder path where the input files are located, if you prefer to use local files instead of uploading them."
|
267 |
+
" Leave this field empty if you do not wish to use a local path.",
|
268 |
+
visible=self.args.colab,
|
269 |
+
value="")
|
270 |
+
|
271 |
+
whisper_params, dd_file_format, cb_timestamp = self.create_whisper_parameters()
|
272 |
+
|
273 |
+
with gr.Row():
|
274 |
+
btn_run = gr.Button("Transcribe", variant="primary")
|
275 |
+
btn_reset = gr.Button(value="Reset")
|
276 |
+
btn_reset.click(None,js="window.location.reload()")
|
277 |
+
with gr.Row():
|
278 |
+
with gr.Column(scale=3):
|
279 |
+
tb_indicator = gr.Textbox(label="Output result")
|
280 |
+
with gr.Column(scale=1):
|
281 |
+
tb_info = gr.Textbox(label="Output info", interactive=False, scale=3)
|
282 |
+
files_subtitles = gr.Files(label="Output file", interactive=False, scale=2)
|
283 |
+
# btn_openfolder = gr.Button('📂', scale=1)
|
284 |
+
|
285 |
+
params = [input_file, tb_input_folder, dd_file_format, cb_timestamp]
|
286 |
+
btn_run.click(fn=self.whisper_inf.transcribe_file,
|
287 |
+
inputs=params + whisper_params.as_list(),
|
288 |
+
outputs=[tb_indicator, files_subtitles, tb_info])
|
289 |
+
# btn_openfolder.click(fn=lambda: self.open_folder("outputs"), inputs=None, outputs=None)
|
290 |
+
|
291 |
+
with gr.TabItem("Device info"): # tab2
|
292 |
+
with gr.Column():
|
293 |
+
gr.Markdown(device_info, label="Hardware info & installed packages")
|
294 |
+
|
295 |
+
# Launch the app with optional gradio settings
|
296 |
+
args = self.args
|
297 |
+
|
298 |
+
self.app.queue(
|
299 |
+
api_open=args.api_open
|
300 |
+
).launch(
|
301 |
+
share=args.share,
|
302 |
+
server_name=args.server_name,
|
303 |
+
server_port=args.server_port,
|
304 |
+
auth=(args.username, args.password) if args.username and args.password else None,
|
305 |
+
root_path=args.root_path,
|
306 |
+
inbrowser=args.inbrowser
|
307 |
+
)
|
308 |
+
|
309 |
+
@staticmethod
|
310 |
+
def open_folder(folder_path: str):
|
311 |
+
if os.path.exists(folder_path):
|
312 |
+
os.system(f"start {folder_path}")
|
313 |
+
else:
|
314 |
+
os.makedirs(folder_path, exist_ok=True)
|
315 |
+
print(f"The directory path {folder_path} has newly created.")
|
316 |
+
|
317 |
+
@staticmethod
|
318 |
+
def on_change_models(model_size: str):
|
319 |
+
#translatable_model = ["large", "large-v1", "large-v2", "large-v3"]
|
320 |
+
translatable_model = self.whisper_inf.available_models
|
321 |
+
if model_size not in translatable_model:
|
322 |
+
return gr.Checkbox(visible=False, value=False, interactive=False)
|
323 |
+
#return gr.Checkbox(visible=True, value=False, label="Translate to English (large models only)", interactive=False)
|
324 |
+
else:
|
325 |
+
return gr.Checkbox(visible=True, value=False, label="Translate to English", interactive=True)
|
326 |
+
|
327 |
+
|
328 |
+
# Create the parser for command-line arguments
|
329 |
+
parser = argparse.ArgumentParser()
|
330 |
+
parser.add_argument('--whisper_type', type=str, default="faster-whisper",
|
331 |
+
help='A type of the whisper implementation between: ["whisper", "faster-whisper", "insanely-fast-whisper"]')
|
332 |
+
parser.add_argument('--share', type=str2bool, default=False, nargs='?', const=True, help='Gradio share value')
|
333 |
+
parser.add_argument('--server_name', type=str, default=None, help='Gradio server host')
|
334 |
+
parser.add_argument('--server_port', type=int, default=None, help='Gradio server port')
|
335 |
+
parser.add_argument('--root_path', type=str, default=None, help='Gradio root path')
|
336 |
+
parser.add_argument('--username', type=str, default=None, help='Gradio authentication username')
|
337 |
+
parser.add_argument('--password', type=str, default=None, help='Gradio authentication password')
|
338 |
+
parser.add_argument('--theme', type=str, default=None, help='Gradio Blocks theme')
|
339 |
+
parser.add_argument('--colab', type=str2bool, default=False, nargs='?', const=True, help='Is colab user or not')
|
340 |
+
parser.add_argument('--api_open', type=str2bool, default=False, nargs='?', const=True, help='Enable api or not in Gradio')
|
341 |
+
parser.add_argument('--inbrowser', type=str2bool, default=True, nargs='?', const=True, help='Whether to automatically start Gradio app or not')
|
342 |
+
parser.add_argument('--whisper_model_dir', type=str, default=WHISPER_MODELS_DIR,
|
343 |
+
help='Directory path of the whisper model')
|
344 |
+
parser.add_argument('--faster_whisper_model_dir', type=str, default=FASTER_WHISPER_MODELS_DIR,
|
345 |
+
help='Directory path of the faster-whisper model')
|
346 |
+
parser.add_argument('--insanely_fast_whisper_model_dir', type=str,
|
347 |
+
default=INSANELY_FAST_WHISPER_MODELS_DIR,
|
348 |
+
help='Directory path of the insanely-fast-whisper model')
|
349 |
+
parser.add_argument('--diarization_model_dir', type=str, default=DIARIZATION_MODELS_DIR,
|
350 |
+
help='Directory path of the diarization model')
|
351 |
+
parser.add_argument('--nllb_model_dir', type=str, default=NLLB_MODELS_DIR,
|
352 |
+
help='Directory path of the Facebook NLLB model')
|
353 |
+
parser.add_argument('--uvr_model_dir', type=str, default=UVR_MODELS_DIR,
|
354 |
+
help='Directory path of the UVR model')
|
355 |
+
parser.add_argument('--output_dir', type=str, default=OUTPUT_DIR, help='Directory path of the outputs')
|
356 |
+
_args = parser.parse_args()
|
357 |
+
|
358 |
+
if __name__ == "__main__":
|
359 |
+
app = App(args=_args)
|
360 |
+
app.launch()
|