File size: 30,495 Bytes
b645714
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aae0ebb
6b9c1cb
b645714
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5f641b4
 
 
 
b645714
94693b1
9dadcd7
d88de84
db6eb06
9dadcd7
 
 
d88de84
2454b76
5e14806
f47aa6a
ec16f81
 
 
ace365a
e89cc88
ace365a
5e14806
782d41c
 
 
 
a4a2618
782d41c
22205d7
782d41c
edfdbbb
c4dd130
d8e191b
2f73f46
d8e191b
2f73f46
 
 
 
 
 
 
 
 
 
ae693d7
2f73f46
 
 
 
 
 
782d41c
d8e191b
22205d7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2f73f46
 
 
22205d7
 
 
 
 
 
 
 
 
2f73f46
d8e191b
 
 
2f73f46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b645714
940dfc4
b645714
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4bbbd41
 
 
 
b645714
 
 
 
 
 
 
 
 
 
 
 
 
775f489
b645714
 
10a54b7
ead1bcb
b645714
 
 
 
 
8e71727
4bbbd41
b645714
 
 
 
 
 
a4738d6
f9de235
a4738d6
f9de235
 
 
b645714
e681803
b645714
 
 
 
 
 
0157bd1
 
b645714
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
940dfc4
 
 
 
 
 
b645714
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
import os
import argparse
import gradio as gr
import yaml

from modules.utils.paths import (FASTER_WHISPER_MODELS_DIR, DIARIZATION_MODELS_DIR, OUTPUT_DIR, WHISPER_MODELS_DIR,
                                 INSANELY_FAST_WHISPER_MODELS_DIR, NLLB_MODELS_DIR, DEFAULT_PARAMETERS_CONFIG_PATH,
                                 UVR_MODELS_DIR)
from modules.utils.files_manager import load_yaml
from modules.whisper.whisper_factory import WhisperFactory
from modules.whisper.faster_whisper_inference import FasterWhisperInference
from modules.whisper.insanely_fast_whisper_inference import InsanelyFastWhisperInference
from modules.translation.nllb_inference import NLLBInference
from modules.ui.htmls import *
from modules.utils.cli_manager import str2bool
from modules.utils.youtube_manager import get_ytmetas
from modules.translation.deepl_api import DeepLAPI
from modules.whisper.whisper_parameter import *

### Device info ###
import torch
import torchaudio
import torch.cuda as cuda
import platform
from transformers import __version__ as transformers_version

device = "cuda" if torch.cuda.is_available() else "cpu"
num_gpus = cuda.device_count() if torch.cuda.is_available() else 0
cuda_version = torch.version.cuda if torch.cuda.is_available() else "N/A"
cudnn_version = torch.backends.cudnn.version() if torch.cuda.is_available() else "N/A"
os_info = platform.system() + " " + platform.release() + " " + platform.machine()

# Get the available VRAM for each GPU (if available)
vram_info = []
if torch.cuda.is_available():
    for i in range(cuda.device_count()):
        gpu_properties = cuda.get_device_properties(i)
        vram_info.append(f"**GPU {i}: {gpu_properties.total_memory / 1024**3:.2f} GB**")

pytorch_version = torch.__version__
torchaudio_version = torchaudio.__version__ if 'torchaudio' in dir() else "N/A"

device_info = f"""Running on: **{device}**

    Number of GPUs available: **{num_gpus}**

    CUDA version: **{cuda_version}**

    CuDNN version: **{cudnn_version}**

    PyTorch version: **{pytorch_version}**

    Torchaudio version: **{torchaudio_version}**

    Transformers version: **{transformers_version}**

    Operating system: **{os_info}**

    Available VRAM: 
    \t {', '.join(vram_info) if vram_info else '**N/A**'}
"""
### End Device info ###

class App:
    def __init__(self, args):
        self.args = args
        #self.app = gr.Blocks(css=CSS, theme=self.args.theme, delete_cache=(60, 3600))
        self.app = gr.Blocks(css=CSS,theme=gr.themes.Ocean(), title="Automatic speech recognition", delete_cache=(60, 3600))
        self.whisper_inf = WhisperFactory.create_whisper_inference(
            whisper_type=self.args.whisper_type,
            whisper_model_dir=self.args.whisper_model_dir,
            faster_whisper_model_dir=self.args.faster_whisper_model_dir,
            insanely_fast_whisper_model_dir=self.args.insanely_fast_whisper_model_dir,
            uvr_model_dir=self.args.uvr_model_dir,
            output_dir=self.args.output_dir,
        )
        self.nllb_inf = NLLBInference(
            model_dir=self.args.nllb_model_dir,
            output_dir=os.path.join(self.args.output_dir, "translations")
        )
        self.deepl_api = DeepLAPI(
            output_dir=os.path.join(self.args.output_dir, "translations")
        )
        self.default_params = load_yaml(DEFAULT_PARAMETERS_CONFIG_PATH)
        print(f"Use \"{self.args.whisper_type}\" implementation")
        print(f"Device \"{self.whisper_inf.device}\" is detected")

    def create_whisper_parameters(self):

        whisper_params = self.default_params["whisper"]
        diarization_params = self.default_params["diarization"]
        vad_params = self.default_params["vad"]
        uvr_params = self.default_params["bgm_separation"]

        #Translation integration
        translation_params = self.default_params["translation"]
        nllb_params = translation_params["nllb"]

        with gr.Row():
            with gr.Column(scale=4):
                with gr.Row():
                    dd_model = gr.Dropdown(choices=self.whisper_inf.available_models, value=whisper_params["model_size"],label="Model", info="Larger models increase transcription quality, but reduce performance", interactive=True)
                    dd_lang = gr.Dropdown(choices=["Automatic Detection"] + self.whisper_inf.available_langs,value=whisper_params["lang"], label="Language", info="If the language is known upfront, always set it manually", interactive=True)
                    #dd_file_format = gr.Dropdown(choices=["SRT", "WebVTT", "txt"], value="SRT", label="File Format")
                    dd_file_format = gr.Dropdown(choices=["TXT","SRT"], value="TXT", label="Output format", info="Output preview format", interactive=True, visible=False)
                with gr.Row():
                    dd_translate_model = gr.Dropdown(choices=self.nllb_inf.available_models, value=nllb_params["model_size"],label="Model", info="Model used for translation", interactive=True)
                    dd_target_lang = gr.Dropdown(choices=["English","Dutch","French","German"], value=nllb_params["target_lang"],label="Language", info="Language used for output translation", interactive=True)
            with gr.Column(scale=1):
                with gr.Row():
                    cb_timestamp = gr.Checkbox(value=whisper_params["add_timestamp"], label="Add timestamp to output file",interactive=True)
                with gr.Row():
                    cb_translate = gr.Checkbox(value=whisper_params["is_translate"], label="Translate transcription to English", info="Translate using OpenAI Whisper's built-in module",interactive=True)
                with gr.Row():
                    cb_translate_output = gr.Checkbox(value=translation_params["translate_output"], label="Translate output to selected language", info="Translate using Facebook's NLLB",interactive=True)
                
#        with gr.Accordion("Speaker diarization", open=False, visible=True):
#            cb_diarize = gr.Checkbox(value=diarization_params["is_diarize"], label="Use diarization",interactive=True)
#            tb_hf_token = gr.Text(label="Token", value=diarization_params["hf_token"],info="Required to use diarization")
#            gr.Markdown("""
#                        An access token can be created [here](https://hf.co/settings/tokens). If not done yet for your account, you need to accept the terms & conditions of [diarization](https://huggingface.co/pyannote/speaker-diarization-3.1) & [segmentation](https://huggingface.co/pyannote/segmentation-3.0).
#                        """)

        with gr.Accordion("Speaker diarization", open=False, visible=True):
            cb_diarize = gr.Checkbox(value=diarization_params["is_diarize"],label="Use diarization",interactive=True)
            tb_hf_token = gr.Text(label="Token", value=diarization_params["hf_token"],info="An access token is required to use diarization & can be created [here](https://hf.co/settings/tokens). If not done yet for your account, you need to accept the terms & conditions of [diarization](https://huggingface.co/pyannote/speaker-diarization-3.1) & [segmentation](https://huggingface.co/pyannote/segmentation-3.0)")

        with gr.Accordion("Voice Detection Filter", open=False, visible=True):
            cb_vad_filter = gr.Checkbox(label="Enable Silero VAD Filter", value=vad_params["vad_filter"],
                                        interactive=True,
                                        info="Enable to transcribe only detected voice parts")
            sd_threshold = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Speech Threshold",
                                     value=vad_params["threshold"],
                                     info="Lower it to be more sensitive to small sounds")
            nb_min_speech_duration_ms = gr.Number(label="Minimum Speech Duration (ms)", precision=0,
                                                  value=vad_params["min_speech_duration_ms"],
                                                  info="Final speech chunks shorter than this time are thrown out")
            nb_max_speech_duration_s = gr.Number(label="Maximum Speech Duration (s)",
                                                 value=vad_params["max_speech_duration_s"],
                                                 info="Maximum duration of speech chunks in seconds")
            nb_min_silence_duration_ms = gr.Number(label="Minimum Silence Duration (ms)", precision=0,
                                                   value=vad_params["min_silence_duration_ms"],
                                                   info="In the end of each speech chunk wait for this time"
                                                        " before separating it")
            nb_speech_pad_ms = gr.Number(label="Speech Padding (ms)", precision=0, value=vad_params["speech_pad_ms"],
                                         info="Final speech chunks are padded by this time each side")
        
        with gr.Accordion("Advanced options", open=False, visible=False):
            with gr.Accordion("Advanced diarization options", open=False, visible=True):    
                dd_diarization_device = gr.Dropdown(label="Device",
                                                    choices=self.whisper_inf.diarizer.get_available_device(),
                                                    value=self.whisper_inf.diarizer.get_device())
                                                    
            with gr.Accordion("Advanced processing options", open=False):
                nb_beam_size = gr.Number(label="Beam Size", value=whisper_params["beam_size"], precision=0, interactive=True,
                                         info="Beam size to use for decoding.")
                nb_log_prob_threshold = gr.Number(label="Log Probability Threshold", value=whisper_params["log_prob_threshold"], interactive=True,
                                                  info="If the average log probability over sampled tokens is below this value, treat as failed.")
                nb_no_speech_threshold = gr.Number(label="No Speech Threshold", value=whisper_params["no_speech_threshold"], interactive=True,
                                                   info="If the no speech probability is higher than this value AND the average log probability over sampled tokens is below 'Log Prob Threshold', consider the segment as silent.")
                dd_compute_type = gr.Dropdown(label="Compute Type", choices=self.whisper_inf.available_compute_types,
                                              value=self.whisper_inf.current_compute_type, interactive=True,
                                              allow_custom_value=True,
                                              info="Select the type of computation to perform.")
                nb_best_of = gr.Number(label="Best Of", value=whisper_params["best_of"], interactive=True,
                                       info="Number of candidates when sampling with non-zero temperature.")
                nb_patience = gr.Number(label="Patience", value=whisper_params["patience"], interactive=True,
                                        info="Beam search patience factor.")
                cb_condition_on_previous_text = gr.Checkbox(label="Condition On Previous Text", value=whisper_params["condition_on_previous_text"],
                                                            interactive=True,
                                                            info="Condition on previous text during decoding.")
                sld_prompt_reset_on_temperature = gr.Slider(label="Prompt Reset On Temperature", value=whisper_params["prompt_reset_on_temperature"],
                                                            minimum=0, maximum=1, step=0.01, interactive=True,
                                                            info="Resets prompt if temperature is above this value."
                                                                 " Arg has effect only if 'Condition On Previous Text' is True.")
                tb_initial_prompt = gr.Textbox(label="Initial Prompt", value=None, interactive=True,
                                               info="Initial prompt to use for decoding.")
                sd_temperature = gr.Slider(label="Temperature", value=whisper_params["temperature"], minimum=0.0,
                                           step=0.01, maximum=1.0, interactive=True,
                                           info="Temperature for sampling. It can be a tuple of temperatures, which will be successively used upon failures according to either `Compression Ratio Threshold` or `Log Prob Threshold`.")
                nb_compression_ratio_threshold = gr.Number(label="Compression Ratio Threshold", value=whisper_params["compression_ratio_threshold"],
                                                           interactive=True,
                                                           info="If the gzip compression ratio is above this value, treat as failed.")
                nb_chunk_length = gr.Number(label="Chunk Length (s)", value=lambda: whisper_params["chunk_length"],
                                            precision=0,
                                            info="The length of audio segments. If it is not None, it will overwrite the default chunk_length of the FeatureExtractor.")
                with gr.Group(visible=isinstance(self.whisper_inf, FasterWhisperInference)):
                    nb_length_penalty = gr.Number(label="Length Penalty", value=whisper_params["length_penalty"],
                                                  info="Exponential length penalty constant.")
                    nb_repetition_penalty = gr.Number(label="Repetition Penalty", value=whisper_params["repetition_penalty"],
                                                      info="Penalty applied to the score of previously generated tokens (set > 1 to penalize).")
                    nb_no_repeat_ngram_size = gr.Number(label="No Repeat N-gram Size", value=whisper_params["no_repeat_ngram_size"],
                                                        precision=0,
                                                        info="Prevent repetitions of n-grams with this size (set 0 to disable).")
                    tb_prefix = gr.Textbox(label="Prefix", value=lambda: whisper_params["prefix"],
                                           info="Optional text to provide as a prefix for the first window.")
                    cb_suppress_blank = gr.Checkbox(label="Suppress Blank", value=whisper_params["suppress_blank"],
                                                    info="Suppress blank outputs at the beginning of the sampling.")
                    tb_suppress_tokens = gr.Textbox(label="Suppress Tokens", value=whisper_params["suppress_tokens"],
                                                    info="List of token IDs to suppress. -1 will suppress a default set of symbols as defined in the model config.json file.")
                    nb_max_initial_timestamp = gr.Number(label="Max Initial Timestamp", value=whisper_params["max_initial_timestamp"],
                                                         info="The initial timestamp cannot be later than this.")
                    cb_word_timestamps = gr.Checkbox(label="Word Timestamps", value=whisper_params["word_timestamps"],
                                                     info="Extract word-level timestamps using the cross-attention pattern and dynamic time warping, and include the timestamps for each word in each segment.")
                    tb_prepend_punctuations = gr.Textbox(label="Prepend Punctuations", value=whisper_params["prepend_punctuations"],
                                                         info="If 'Word Timestamps' is True, merge these punctuation symbols with the next word.")
                    tb_append_punctuations = gr.Textbox(label="Append Punctuations", value=whisper_params["append_punctuations"],
                                                        info="If 'Word Timestamps' is True, merge these punctuation symbols with the previous word.")
                    nb_max_new_tokens = gr.Number(label="Max New Tokens", value=lambda: whisper_params["max_new_tokens"],
                                                  precision=0,
                                                  info="Maximum number of new tokens to generate per-chunk. If not set, the maximum will be set by the default max_length.")
                    nb_hallucination_silence_threshold = gr.Number(label="Hallucination Silence Threshold (sec)",
                                                                   value=lambda: whisper_params["hallucination_silence_threshold"],
                                                                   info="When 'Word Timestamps' is True, skip silent periods longer than this threshold (in seconds) when a possible hallucination is detected.")
                    tb_hotwords = gr.Textbox(label="Hotwords", value=lambda: whisper_params["hotwords"],
                                             info="Hotwords/hint phrases to provide the model with. Has no effect if prefix is not None.")
                    nb_language_detection_threshold = gr.Number(label="Language Detection Threshold", value=lambda: whisper_params["language_detection_threshold"],
                                                                info="If the maximum probability of the language tokens is higher than this value, the language is detected.")
                    nb_language_detection_segments = gr.Number(label="Language Detection Segments", value=lambda: whisper_params["language_detection_segments"],
                                                               precision=0,
                                                               info="Number of segments to consider for the language detection.")
                with gr.Group(visible=isinstance(self.whisper_inf, InsanelyFastWhisperInference)):
                    nb_batch_size = gr.Number(label="Batch Size", value=whisper_params["batch_size"], precision=0)
    
            with gr.Accordion("Background Music Remover Filter", open=False):
                cb_bgm_separation = gr.Checkbox(label="Enable Background Music Remover Filter", value=uvr_params["is_separate_bgm"],
                                                interactive=True,
                                                info="Enabling this will remove background music by submodel before transcribing.")
                dd_uvr_device = gr.Dropdown(label="Device", value=self.whisper_inf.music_separator.device,
                                            choices=self.whisper_inf.music_separator.available_devices)
                dd_uvr_model_size = gr.Dropdown(label="Model", value=uvr_params["model_size"],
                                                choices=self.whisper_inf.music_separator.available_models)
                nb_uvr_segment_size = gr.Number(label="Segment Size", value=uvr_params["segment_size"], precision=0)
                cb_uvr_save_file = gr.Checkbox(label="Save separated files to output", value=uvr_params["save_file"])
                cb_uvr_enable_offload = gr.Checkbox(label="Offload sub model after removing background music",
                                                    value=uvr_params["enable_offload"])
    
#            with gr.Accordion("Voice Detection Filter", open=False):
#                cb_vad_filter = gr.Checkbox(label="Enable Silero VAD Filter", value=vad_params["vad_filter"],
#                                            interactive=True,
#                                            info="Enable this to transcribe only detected voice parts by submodel.")
#                sd_threshold = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Speech Threshold",
#                                         value=vad_params["threshold"],
#                                         info="Lower it to be more sensitive to small sounds.")
#                nb_min_speech_duration_ms = gr.Number(label="Minimum Speech Duration (ms)", precision=0,
#                                                      value=vad_params["min_speech_duration_ms"],
#                                                      info="Final speech chunks shorter than this time are thrown out")
#                nb_max_speech_duration_s = gr.Number(label="Maximum Speech Duration (s)",
#                                                     value=vad_params["max_speech_duration_s"],
#                                                     info="Maximum duration of speech chunks in \"seconds\".")
#                nb_min_silence_duration_ms = gr.Number(label="Minimum Silence Duration (ms)", precision=0,
#                                                       value=vad_params["min_silence_duration_ms"],
#                                                       info="In the end of each speech chunk wait for this time"
#                                                            " before separating it")
#                nb_speech_pad_ms = gr.Number(label="Speech Padding (ms)", precision=0, value=vad_params["speech_pad_ms"],
#                                             info="Final speech chunks are padded by this time each side")

        #dd_model.change(fn=self.on_change_models, inputs=[dd_model], outputs=[cb_translate])

        return (
            WhisperParameters(
                model_size=dd_model, lang=dd_lang, is_translate=cb_translate, beam_size=nb_beam_size,
                log_prob_threshold=nb_log_prob_threshold, no_speech_threshold=nb_no_speech_threshold,
                compute_type=dd_compute_type, best_of=nb_best_of, patience=nb_patience,
                condition_on_previous_text=cb_condition_on_previous_text, initial_prompt=tb_initial_prompt,
                temperature=sd_temperature, compression_ratio_threshold=nb_compression_ratio_threshold,
                vad_filter=cb_vad_filter, threshold=sd_threshold, min_speech_duration_ms=nb_min_speech_duration_ms,
                max_speech_duration_s=nb_max_speech_duration_s, min_silence_duration_ms=nb_min_silence_duration_ms,
                speech_pad_ms=nb_speech_pad_ms, chunk_length=nb_chunk_length, batch_size=nb_batch_size,
                is_diarize=cb_diarize, hf_token=tb_hf_token, diarization_device=dd_diarization_device,
                length_penalty=nb_length_penalty, repetition_penalty=nb_repetition_penalty,
                no_repeat_ngram_size=nb_no_repeat_ngram_size, prefix=tb_prefix, suppress_blank=cb_suppress_blank,
                suppress_tokens=tb_suppress_tokens, max_initial_timestamp=nb_max_initial_timestamp,
                word_timestamps=cb_word_timestamps, prepend_punctuations=tb_prepend_punctuations,
                append_punctuations=tb_append_punctuations, max_new_tokens=nb_max_new_tokens,
                hallucination_silence_threshold=nb_hallucination_silence_threshold, hotwords=tb_hotwords,
                language_detection_threshold=nb_language_detection_threshold,
                language_detection_segments=nb_language_detection_segments,
                prompt_reset_on_temperature=sld_prompt_reset_on_temperature, is_bgm_separate=cb_bgm_separation,
                uvr_device=dd_uvr_device, uvr_model_size=dd_uvr_model_size, uvr_segment_size=nb_uvr_segment_size,
                uvr_save_file=cb_uvr_save_file, uvr_enable_offload=cb_uvr_enable_offload
            ),
            dd_file_format,
            cb_timestamp,
            cb_translate_output,
            dd_translate_model,
            dd_target_lang
        )

    def launch(self):
        translation_params = self.default_params["translation"]
        deepl_params = translation_params["deepl"]
        nllb_params = translation_params["nllb"]
        uvr_params = self.default_params["bgm_separation"]

        with self.app:
            with gr.Row():
                with gr.Column():
                    gr.Markdown(MARKDOWN, elem_id="md_project")
            with gr.Tabs():
                with gr.TabItem("Audio upload/record"):  # tab1
                    with gr.Column():
                        #input_file = gr.Files(type="filepath", label="Upload File here")
                        #input_file = gr.File(type="filepath", label="Upload audio/video file here")
                        input_file = gr.Audio(type='filepath', elem_id="audio_input", show_download_button=True)
                        tb_input_folder = gr.Textbox(label="Input Folder Path (Optional)",
                                                     info="Optional: Specify the folder path where the input files are located, if you prefer to use local files instead of uploading them."
                                                          " Leave this field empty if you do not wish to use a local path.",
                                                     visible=self.args.colab,
                                                     value="")

                    whisper_params, dd_file_format, cb_timestamp, cb_translate_output, dd_translate_model, dd_target_lang = self.create_whisper_parameters()

                    with gr.Row():
                        btn_run = gr.Button("Transcribe", variant="primary")
                        btn_reset = gr.Button(value="Reset")
                        btn_reset.click(None,js="window.location.reload()")
                    with gr.Row():
                        with gr.Column(scale=4):
                            tb_indicator = gr.Textbox(label="Output preview (Always review & verify the output generated by AI models)", show_copy_button=True, show_label=True)
                        with gr.Column(scale=1):
                            tb_info = gr.Textbox(label="Output info", interactive=False, show_copy_button=True)
                            files_subtitles = gr.Files(label="Output data", interactive=False, file_count="multiple")
                            # btn_openfolder = gr.Button('📂', scale=1)                        

                    params = [input_file, tb_input_folder, dd_file_format, cb_timestamp, cb_translate_output, dd_translate_model, dd_target_lang]
                    btn_run.click(fn=self.whisper_inf.transcribe_file,
                                  inputs=params + whisper_params.as_list(),
                                  outputs=[tb_indicator, files_subtitles, tb_info])
                    # btn_openfolder.click(fn=lambda: self.open_folder("outputs"), inputs=None, outputs=None)

                with gr.TabItem("Device info"):  # tab2
                    with gr.Column():
                        gr.Markdown(device_info, label="Hardware info & installed packages")

        # Launch the app with optional gradio settings
        args = self.args

        self.app.queue(
            api_open=args.api_open
        ).launch(
            share=args.share,
            server_name=args.server_name,
            server_port=args.server_port,
            auth=(args.username, args.password) if args.username and args.password else None,
            root_path=args.root_path,
            inbrowser=args.inbrowser
        )

    @staticmethod
    def open_folder(folder_path: str):
        if os.path.exists(folder_path):
            os.system(f"start {folder_path}")
        else:
            os.makedirs(folder_path, exist_ok=True)
            print(f"The directory path {folder_path} has newly created.")

    @staticmethod
    def on_change_models(model_size: str):
        translatable_model = ["large", "large-v1", "large-v2", "large-v3"]
        if model_size not in translatable_model:
            return gr.Checkbox(visible=False, value=False, interactive=False)
           #return gr.Checkbox(visible=True, value=False, label="Translate to English (large models only)", interactive=False)
        else:
            return gr.Checkbox(visible=True, value=False, label="Translate to English", interactive=True)

# Create the parser for command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--whisper_type', type=str, default="faster-whisper",
                    help='A type of the whisper implementation between: ["whisper", "faster-whisper", "insanely-fast-whisper"]')
parser.add_argument('--share', type=str2bool, default=False, nargs='?', const=True, help='Gradio share value')
parser.add_argument('--server_name', type=str, default=None, help='Gradio server host')
parser.add_argument('--server_port', type=int, default=None, help='Gradio server port')
parser.add_argument('--root_path', type=str, default=None, help='Gradio root path')
parser.add_argument('--username', type=str, default=None, help='Gradio authentication username')
parser.add_argument('--password', type=str, default=None, help='Gradio authentication password')
parser.add_argument('--theme', type=str, default=None, help='Gradio Blocks theme')
parser.add_argument('--colab', type=str2bool, default=False, nargs='?', const=True, help='Is colab user or not')
parser.add_argument('--api_open', type=str2bool, default=False, nargs='?', const=True, help='Enable api or not in Gradio')
parser.add_argument('--inbrowser', type=str2bool, default=True, nargs='?', const=True, help='Whether to automatically start Gradio app or not')
parser.add_argument('--whisper_model_dir', type=str, default=WHISPER_MODELS_DIR,
                    help='Directory path of the whisper model')
parser.add_argument('--faster_whisper_model_dir', type=str, default=FASTER_WHISPER_MODELS_DIR,
                    help='Directory path of the faster-whisper model')
parser.add_argument('--insanely_fast_whisper_model_dir', type=str,
                    default=INSANELY_FAST_WHISPER_MODELS_DIR,
                    help='Directory path of the insanely-fast-whisper model')
parser.add_argument('--diarization_model_dir', type=str, default=DIARIZATION_MODELS_DIR,
                    help='Directory path of the diarization model')
parser.add_argument('--nllb_model_dir', type=str, default=NLLB_MODELS_DIR,
                    help='Directory path of the Facebook NLLB model')
parser.add_argument('--uvr_model_dir', type=str, default=UVR_MODELS_DIR,
                    help='Directory path of the UVR model')
parser.add_argument('--output_dir', type=str, default=OUTPUT_DIR, help='Directory path of the outputs')
_args = parser.parse_args()

if __name__ == "__main__":
    app = App(args=_args)
    app.launch()