|
import os |
|
import argparse |
|
import gradio as gr |
|
import yaml |
|
|
|
from modules.utils.paths import (FASTER_WHISPER_MODELS_DIR, DIARIZATION_MODELS_DIR, OUTPUT_DIR, WHISPER_MODELS_DIR, |
|
INSANELY_FAST_WHISPER_MODELS_DIR, NLLB_MODELS_DIR, DEFAULT_PARAMETERS_CONFIG_PATH, |
|
UVR_MODELS_DIR) |
|
from modules.utils.files_manager import load_yaml |
|
from modules.whisper.whisper_factory import WhisperFactory |
|
from modules.whisper.faster_whisper_inference import FasterWhisperInference |
|
from modules.whisper.insanely_fast_whisper_inference import InsanelyFastWhisperInference |
|
from modules.translation.nllb_inference import NLLBInference |
|
from modules.ui.htmls import * |
|
from modules.utils.cli_manager import str2bool |
|
from modules.utils.youtube_manager import get_ytmetas |
|
from modules.translation.deepl_api import DeepLAPI |
|
from modules.whisper.whisper_parameter import * |
|
|
|
|
|
import torch |
|
import torchaudio |
|
import torch.cuda as cuda |
|
import platform |
|
from transformers import __version__ as transformers_version |
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
num_gpus = cuda.device_count() if torch.cuda.is_available() else 0 |
|
cuda_version = torch.version.cuda if torch.cuda.is_available() else "N/A" |
|
cudnn_version = torch.backends.cudnn.version() if torch.cuda.is_available() else "N/A" |
|
os_info = platform.system() + " " + platform.release() + " " + platform.machine() |
|
|
|
|
|
vram_info = [] |
|
if torch.cuda.is_available(): |
|
for i in range(cuda.device_count()): |
|
gpu_properties = cuda.get_device_properties(i) |
|
vram_info.append(f"**GPU {i}: {gpu_properties.total_memory / 1024**3:.2f} GB**") |
|
|
|
pytorch_version = torch.__version__ |
|
torchaudio_version = torchaudio.__version__ if 'torchaudio' in dir() else "N/A" |
|
|
|
device_info = f"""Running on: **{device}** |
|
|
|
Number of GPUs available: **{num_gpus}** |
|
|
|
CUDA version: **{cuda_version}** |
|
|
|
CuDNN version: **{cudnn_version}** |
|
|
|
PyTorch version: **{pytorch_version}** |
|
|
|
Torchaudio version: **{torchaudio_version}** |
|
|
|
Transformers version: **{transformers_version}** |
|
|
|
Operating system: **{os_info}** |
|
|
|
Available VRAM: |
|
\t {', '.join(vram_info) if vram_info else '**N/A**'} |
|
""" |
|
|
|
|
|
class App: |
|
def __init__(self, args): |
|
self.args = args |
|
|
|
self.app = gr.Blocks(css=CSS,theme=gr.themes.Ocean(), title="Whisper - Automatic speech recognition", delete_cache=(60, 3600)) |
|
self.whisper_inf = WhisperFactory.create_whisper_inference( |
|
whisper_type=self.args.whisper_type, |
|
whisper_model_dir=self.args.whisper_model_dir, |
|
faster_whisper_model_dir=self.args.faster_whisper_model_dir, |
|
insanely_fast_whisper_model_dir=self.args.insanely_fast_whisper_model_dir, |
|
uvr_model_dir=self.args.uvr_model_dir, |
|
output_dir=self.args.output_dir, |
|
) |
|
self.nllb_inf = NLLBInference( |
|
model_dir=self.args.nllb_model_dir, |
|
output_dir=os.path.join(self.args.output_dir, "translations") |
|
) |
|
self.deepl_api = DeepLAPI( |
|
output_dir=os.path.join(self.args.output_dir, "translations") |
|
) |
|
self.default_params = load_yaml(DEFAULT_PARAMETERS_CONFIG_PATH) |
|
print(f"Use \"{self.args.whisper_type}\" implementation") |
|
print(f"Device \"{self.whisper_inf.device}\" is detected") |
|
|
|
def create_whisper_parameters(self): |
|
|
|
whisper_params = self.default_params["whisper"] |
|
diarization_params = self.default_params["diarization"] |
|
vad_params = self.default_params["vad"] |
|
uvr_params = self.default_params["bgm_separation"] |
|
|
|
|
|
translation_params = self.default_params["translation"] |
|
nllb_params = translation_params["nllb"] |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=4): |
|
with gr.Row(): |
|
dd_model = gr.Dropdown(choices=self.whisper_inf.available_models, value=whisper_params["model_size"],label="Model", info="Larger models increase transcription quality, but reduce performance", interactive=True) |
|
dd_lang = gr.Dropdown(choices=["Automatic Detection"] + self.whisper_inf.available_langs,value=whisper_params["lang"], label="Language", info="If the language is known upfront, always set it manually", interactive=True) |
|
dd_file_format = gr.Dropdown(choices=["TXT","SRT"], value="TXT", label="Output format", info="Output preview format", interactive=True, visible=False) |
|
with gr.Row(): |
|
dd_translate_model = gr.Dropdown(choices=self.nllb_inf.available_models, value=nllb_params["model_size"],label="Model", info="Model used for translation", interactive=True) |
|
dd_target_lang = gr.Dropdown(choices=["English","Dutch","French","German"], value=nllb_params["target_lang"],label="Language", info="Language used for output translation", interactive=True) |
|
with gr.Column(scale=1): |
|
with gr.Row(): |
|
cb_timestamp = gr.Checkbox(value=whisper_params["add_timestamp"], label="Add timestamp to output file",interactive=True) |
|
with gr.Row(): |
|
cb_translate = gr.Checkbox(value=whisper_params["is_translate"], label="Translate to English", info="Translate using OpenAI Whisper's built-in module",interactive=True) |
|
|
|
cb_translate_output = gr.Checkbox(value=translation_params["translate_output"], label="Translate to selected language", info="Translate using Facebook's NLLB",interactive=True) |
|
|
|
with gr.Accordion("Speaker diarization", open=False, visible=True): |
|
cb_diarize = gr.Checkbox(value=diarization_params["is_diarize"],label="Use diarization",interactive=True) |
|
tb_hf_token = gr.Text(label="Token", value=diarization_params["hf_token"],info="An access token is required to use diarization & can be created [here](https://hf.co/settings/tokens). If not done yet for your account, you need to accept the terms & conditions of [diarization](https://huggingface.co/pyannote/speaker-diarization-3.1) & [segmentation](https://huggingface.co/pyannote/segmentation-3.0)") |
|
dd_diarization_device = gr.Dropdown(label="Device", |
|
choices=self.whisper_inf.diarizer.get_available_device(), |
|
value=self.whisper_inf.diarizer.get_device(), |
|
interactive=True, visible=False) |
|
|
|
with gr.Accordion("Preprocessing options", open=False, visible=True): |
|
|
|
gr.Markdown("<i><b>Note: ⚠ Experimental features (Use with caution)</b></i>") |
|
|
|
with gr.Accordion("Voice Detection Filter", open=False, visible=True): |
|
cb_vad_filter = gr.Checkbox(label="Enable Silero VAD Filter", value=vad_params["vad_filter"], |
|
info="Enable to transcribe only detected voice parts", |
|
interactive=True) |
|
sd_threshold = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Speech Threshold", |
|
value=vad_params["threshold"], |
|
info="Lower it to be more sensitive to small sounds") |
|
nb_min_speech_duration_ms = gr.Number(label="Minimum Speech Duration (ms)", precision=0, |
|
value=vad_params["min_speech_duration_ms"], |
|
info="Final speech chunks shorter than this time are thrown out") |
|
nb_max_speech_duration_s = gr.Number(label="Maximum Speech Duration (s)", |
|
value=vad_params["max_speech_duration_s"], |
|
info="Maximum duration of speech chunks in seconds") |
|
nb_min_silence_duration_ms = gr.Number(label="Minimum Silence Duration (ms)", precision=0, |
|
value=vad_params["min_silence_duration_ms"], |
|
info="In the end of each speech chunk wait for this time" |
|
" before separating it") |
|
nb_speech_pad_ms = gr.Number(label="Speech Padding (ms)", precision=0, value=vad_params["speech_pad_ms"], |
|
info="Final speech chunks are padded by this time each side") |
|
|
|
with gr.Accordion("Background Music Remover Filter", open=False): |
|
cb_bgm_separation = gr.Checkbox(label="Enable Background Music Remover Filter", value=uvr_params["is_separate_bgm"], |
|
info="Enable to remove background music by submodel before transcribing", |
|
interactive=True) |
|
dd_uvr_device = gr.Dropdown(label="Device", |
|
value=self.whisper_inf.music_separator.device, |
|
choices=self.whisper_inf.music_separator.available_devices, |
|
interactive=True, visible=False) |
|
dd_uvr_model_size = gr.Dropdown(label="Model", value=uvr_params["model_size"], |
|
choices=self.whisper_inf.music_separator.available_models, |
|
interactive=True) |
|
nb_uvr_segment_size = gr.Number(label="Segment Size", value=uvr_params["segment_size"], precision=0, |
|
interactive=True, visible=False) |
|
cb_uvr_save_file = gr.Checkbox(label="Save separated files to output", value=uvr_params["save_file"], |
|
interactive=True, visible=False) |
|
cb_uvr_enable_offload = gr.Checkbox(label="Offload sub model after removing background music",value=uvr_params["enable_offload"], |
|
interactive=True, visible=False) |
|
|
|
with gr.Accordion("Advanced processing options", open=False, visible=False): |
|
nb_beam_size = gr.Number(label="Beam Size", value=whisper_params["beam_size"], precision=0, interactive=True, |
|
info="Beam size to use for decoding.") |
|
nb_log_prob_threshold = gr.Number(label="Log Probability Threshold", value=whisper_params["log_prob_threshold"], interactive=True, |
|
info="If the average log probability over sampled tokens is below this value, treat as failed.") |
|
nb_no_speech_threshold = gr.Number(label="No Speech Threshold", value=whisper_params["no_speech_threshold"], interactive=True, |
|
info="If the no speech probability is higher than this value AND the average log probability over sampled tokens is below 'Log Prob Threshold', consider the segment as silent.") |
|
dd_compute_type = gr.Dropdown(label="Compute Type", choices=self.whisper_inf.available_compute_types, |
|
value=self.whisper_inf.current_compute_type, interactive=True, |
|
allow_custom_value=True, |
|
info="Select the type of computation to perform.") |
|
nb_best_of = gr.Number(label="Best Of", value=whisper_params["best_of"], interactive=True, |
|
info="Number of candidates when sampling with non-zero temperature.") |
|
nb_patience = gr.Number(label="Patience", value=whisper_params["patience"], interactive=True, |
|
info="Beam search patience factor.") |
|
cb_condition_on_previous_text = gr.Checkbox(label="Condition On Previous Text", value=whisper_params["condition_on_previous_text"], |
|
interactive=True, |
|
info="Condition on previous text during decoding.") |
|
sld_prompt_reset_on_temperature = gr.Slider(label="Prompt Reset On Temperature", value=whisper_params["prompt_reset_on_temperature"], |
|
minimum=0, maximum=1, step=0.01, interactive=True, |
|
info="Resets prompt if temperature is above this value." |
|
" Arg has effect only if 'Condition On Previous Text' is True.") |
|
tb_initial_prompt = gr.Textbox(label="Initial Prompt", value=None, interactive=True, |
|
info="Initial prompt to use for decoding.") |
|
sd_temperature = gr.Slider(label="Temperature", value=whisper_params["temperature"], minimum=0.0, |
|
step=0.01, maximum=1.0, interactive=True, |
|
info="Temperature for sampling. It can be a tuple of temperatures, which will be successively used upon failures according to either `Compression Ratio Threshold` or `Log Prob Threshold`.") |
|
nb_compression_ratio_threshold = gr.Number(label="Compression Ratio Threshold", value=whisper_params["compression_ratio_threshold"], |
|
interactive=True, |
|
info="If the gzip compression ratio is above this value, treat as failed.") |
|
nb_chunk_length = gr.Number(label="Chunk Length (s)", value=lambda: whisper_params["chunk_length"], |
|
precision=0, |
|
info="The length of audio segments. If it is not None, it will overwrite the default chunk_length of the FeatureExtractor.") |
|
with gr.Group(visible=isinstance(self.whisper_inf, FasterWhisperInference)): |
|
nb_length_penalty = gr.Number(label="Length Penalty", value=whisper_params["length_penalty"], |
|
info="Exponential length penalty constant.") |
|
nb_repetition_penalty = gr.Number(label="Repetition Penalty", value=whisper_params["repetition_penalty"], |
|
info="Penalty applied to the score of previously generated tokens (set > 1 to penalize).") |
|
nb_no_repeat_ngram_size = gr.Number(label="No Repeat N-gram Size", value=whisper_params["no_repeat_ngram_size"], |
|
precision=0, |
|
info="Prevent repetitions of n-grams with this size (set 0 to disable).") |
|
tb_prefix = gr.Textbox(label="Prefix", value=lambda: whisper_params["prefix"], |
|
info="Optional text to provide as a prefix for the first window.") |
|
cb_suppress_blank = gr.Checkbox(label="Suppress Blank", value=whisper_params["suppress_blank"], |
|
info="Suppress blank outputs at the beginning of the sampling.") |
|
tb_suppress_tokens = gr.Textbox(label="Suppress Tokens", value=whisper_params["suppress_tokens"], |
|
info="List of token IDs to suppress. -1 will suppress a default set of symbols as defined in the model config.json file.") |
|
nb_max_initial_timestamp = gr.Number(label="Max Initial Timestamp", value=whisper_params["max_initial_timestamp"], |
|
info="The initial timestamp cannot be later than this.") |
|
cb_word_timestamps = gr.Checkbox(label="Word Timestamps", value=whisper_params["word_timestamps"], |
|
info="Extract word-level timestamps using the cross-attention pattern and dynamic time warping, and include the timestamps for each word in each segment.") |
|
tb_prepend_punctuations = gr.Textbox(label="Prepend Punctuations", value=whisper_params["prepend_punctuations"], |
|
info="If 'Word Timestamps' is True, merge these punctuation symbols with the next word.") |
|
tb_append_punctuations = gr.Textbox(label="Append Punctuations", value=whisper_params["append_punctuations"], |
|
info="If 'Word Timestamps' is True, merge these punctuation symbols with the previous word.") |
|
nb_max_new_tokens = gr.Number(label="Max New Tokens", value=lambda: whisper_params["max_new_tokens"], |
|
precision=0, |
|
info="Maximum number of new tokens to generate per-chunk. If not set, the maximum will be set by the default max_length.") |
|
nb_hallucination_silence_threshold = gr.Number(label="Hallucination Silence Threshold (sec)", |
|
value=lambda: whisper_params["hallucination_silence_threshold"], |
|
info="When 'Word Timestamps' is True, skip silent periods longer than this threshold (in seconds) when a possible hallucination is detected.") |
|
tb_hotwords = gr.Textbox(label="Hotwords", value=lambda: whisper_params["hotwords"], |
|
info="Hotwords/hint phrases to provide the model with. Has no effect if prefix is not None.") |
|
nb_language_detection_threshold = gr.Number(label="Language Detection Threshold", value=lambda: whisper_params["language_detection_threshold"], |
|
info="If the maximum probability of the language tokens is higher than this value, the language is detected.") |
|
nb_language_detection_segments = gr.Number(label="Language Detection Segments", value=lambda: whisper_params["language_detection_segments"], |
|
precision=0, |
|
info="Number of segments to consider for the language detection.") |
|
with gr.Group(visible=isinstance(self.whisper_inf, InsanelyFastWhisperInference)): |
|
nb_batch_size = gr.Number(label="Batch Size", value=whisper_params["batch_size"], precision=0) |
|
|
|
|
|
|
|
|
|
return ( |
|
WhisperParameters( |
|
model_size=dd_model, lang=dd_lang, is_translate=cb_translate, beam_size=nb_beam_size, |
|
log_prob_threshold=nb_log_prob_threshold, no_speech_threshold=nb_no_speech_threshold, |
|
compute_type=dd_compute_type, best_of=nb_best_of, patience=nb_patience, |
|
condition_on_previous_text=cb_condition_on_previous_text, initial_prompt=tb_initial_prompt, |
|
temperature=sd_temperature, compression_ratio_threshold=nb_compression_ratio_threshold, |
|
vad_filter=cb_vad_filter, threshold=sd_threshold, min_speech_duration_ms=nb_min_speech_duration_ms, |
|
max_speech_duration_s=nb_max_speech_duration_s, min_silence_duration_ms=nb_min_silence_duration_ms, |
|
speech_pad_ms=nb_speech_pad_ms, chunk_length=nb_chunk_length, batch_size=nb_batch_size, |
|
is_diarize=cb_diarize, hf_token=tb_hf_token, diarization_device=dd_diarization_device, |
|
length_penalty=nb_length_penalty, repetition_penalty=nb_repetition_penalty, |
|
no_repeat_ngram_size=nb_no_repeat_ngram_size, prefix=tb_prefix, suppress_blank=cb_suppress_blank, |
|
suppress_tokens=tb_suppress_tokens, max_initial_timestamp=nb_max_initial_timestamp, |
|
word_timestamps=cb_word_timestamps, prepend_punctuations=tb_prepend_punctuations, |
|
append_punctuations=tb_append_punctuations, max_new_tokens=nb_max_new_tokens, |
|
hallucination_silence_threshold=nb_hallucination_silence_threshold, hotwords=tb_hotwords, |
|
language_detection_threshold=nb_language_detection_threshold, |
|
language_detection_segments=nb_language_detection_segments, |
|
prompt_reset_on_temperature=sld_prompt_reset_on_temperature, is_bgm_separate=cb_bgm_separation, |
|
uvr_device=dd_uvr_device, uvr_model_size=dd_uvr_model_size, uvr_segment_size=nb_uvr_segment_size, |
|
uvr_save_file=cb_uvr_save_file, uvr_enable_offload=cb_uvr_enable_offload |
|
), |
|
dd_file_format, |
|
cb_timestamp, |
|
cb_translate_output, |
|
dd_translate_model, |
|
dd_target_lang |
|
) |
|
|
|
def launch(self): |
|
translation_params = self.default_params["translation"] |
|
deepl_params = translation_params["deepl"] |
|
nllb_params = translation_params["nllb"] |
|
uvr_params = self.default_params["bgm_separation"] |
|
|
|
with self.app: |
|
with gr.Row(): |
|
with gr.Column(): |
|
gr.Markdown(MARKDOWN, elem_id="md_project") |
|
with gr.Tabs(): |
|
with gr.TabItem("Audio upload/record"): |
|
with gr.Column(): |
|
|
|
|
|
input_file = gr.Audio(type='filepath', elem_id="audio_input", show_download_button=True) |
|
tb_input_folder = gr.Textbox(label="Input Folder Path (Optional)", |
|
info="Optional: Specify the folder path where the input files are located, if you prefer to use local files instead of uploading them." |
|
" Leave this field empty if you do not wish to use a local path.", |
|
visible=self.args.colab, |
|
value="") |
|
|
|
whisper_params, dd_file_format, cb_timestamp, cb_translate_output, dd_translate_model, dd_target_lang = self.create_whisper_parameters() |
|
|
|
with gr.Row(): |
|
btn_run = gr.Button("Transcribe", variant="primary") |
|
btn_reset = gr.Button(value="Reset") |
|
btn_reset.click(None,js="window.location.reload()") |
|
with gr.Row(): |
|
with gr.Column(scale=4): |
|
tb_indicator = gr.Textbox(label="Output preview (Always review output generated by AI models)", show_copy_button=True, show_label=True) |
|
with gr.Column(scale=1): |
|
tb_info = gr.Textbox(label="Output info", interactive=False, show_copy_button=True) |
|
files_subtitles = gr.Files(label="Output data", interactive=False, file_count="multiple") |
|
|
|
|
|
params = [input_file, tb_input_folder, dd_file_format, cb_timestamp, cb_translate_output, dd_translate_model, dd_target_lang] |
|
btn_run.click(fn=self.whisper_inf.transcribe_file, |
|
inputs=params + whisper_params.as_list(), |
|
outputs=[tb_indicator, files_subtitles, tb_info]) |
|
|
|
|
|
with gr.TabItem("Device info"): |
|
with gr.Column(): |
|
gr.Markdown(device_info, label="Hardware info & installed packages") |
|
|
|
|
|
args = self.args |
|
|
|
self.app.queue( |
|
api_open=args.api_open |
|
).launch( |
|
share=args.share, |
|
server_name=args.server_name, |
|
server_port=args.server_port, |
|
auth=(args.username, args.password) if args.username and args.password else None, |
|
root_path=args.root_path, |
|
inbrowser=args.inbrowser |
|
) |
|
|
|
@staticmethod |
|
def open_folder(folder_path: str): |
|
if os.path.exists(folder_path): |
|
os.system(f"start {folder_path}") |
|
else: |
|
os.makedirs(folder_path, exist_ok=True) |
|
print(f"The directory path {folder_path} has newly created.") |
|
|
|
@staticmethod |
|
def on_change_models(model_size: str): |
|
translatable_model = ["large", "large-v1", "large-v2", "large-v3"] |
|
if model_size not in translatable_model: |
|
return gr.Checkbox(visible=False, value=False, interactive=False) |
|
|
|
else: |
|
return gr.Checkbox(visible=True, value=False, label="Translate to English", interactive=True) |
|
|
|
|
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('--whisper_type', type=str, default="faster-whisper", |
|
help='A type of the whisper implementation between: ["whisper", "faster-whisper", "insanely-fast-whisper"]') |
|
parser.add_argument('--share', type=str2bool, default=False, nargs='?', const=True, help='Gradio share value') |
|
parser.add_argument('--server_name', type=str, default=None, help='Gradio server host') |
|
parser.add_argument('--server_port', type=int, default=None, help='Gradio server port') |
|
parser.add_argument('--root_path', type=str, default=None, help='Gradio root path') |
|
parser.add_argument('--username', type=str, default=None, help='Gradio authentication username') |
|
parser.add_argument('--password', type=str, default=None, help='Gradio authentication password') |
|
parser.add_argument('--theme', type=str, default=None, help='Gradio Blocks theme') |
|
parser.add_argument('--colab', type=str2bool, default=False, nargs='?', const=True, help='Is colab user or not') |
|
parser.add_argument('--api_open', type=str2bool, default=False, nargs='?', const=True, help='Enable api or not in Gradio') |
|
parser.add_argument('--inbrowser', type=str2bool, default=True, nargs='?', const=True, help='Whether to automatically start Gradio app or not') |
|
parser.add_argument('--whisper_model_dir', type=str, default=WHISPER_MODELS_DIR, |
|
help='Directory path of the whisper model') |
|
parser.add_argument('--faster_whisper_model_dir', type=str, default=FASTER_WHISPER_MODELS_DIR, |
|
help='Directory path of the faster-whisper model') |
|
parser.add_argument('--insanely_fast_whisper_model_dir', type=str, |
|
default=INSANELY_FAST_WHISPER_MODELS_DIR, |
|
help='Directory path of the insanely-fast-whisper model') |
|
parser.add_argument('--diarization_model_dir', type=str, default=DIARIZATION_MODELS_DIR, |
|
help='Directory path of the diarization model') |
|
parser.add_argument('--nllb_model_dir', type=str, default=NLLB_MODELS_DIR, |
|
help='Directory path of the Facebook NLLB model') |
|
parser.add_argument('--uvr_model_dir', type=str, default=UVR_MODELS_DIR, |
|
help='Directory path of the UVR model') |
|
parser.add_argument('--output_dir', type=str, default=OUTPUT_DIR, help='Directory path of the outputs') |
|
_args = parser.parse_args() |
|
|
|
if __name__ == "__main__": |
|
app = App(args=_args) |
|
app.launch() |
|
|