import gradio as gr import torch from transformers import pipeline MODEL_NAME = "mikr/w2v-bert-2.0-czech-colab-cv16" lang = "cs" device = 0 if torch.cuda.is_available() else "cpu" pipe = pipeline( task="automatic-speech-recognition", model=MODEL_NAME, chunk_length_s=30, device=device, ) def transcribe(file_upload): warn_output = "" if (file_upload is None): return "ERROR: You have to either use the microphone or upload an audio file" file = file_upload text = pipe(file)["text"] return warn_output + text iface = gr.Interface( fn=transcribe, inputs=[ gr.File(type="binary", label="Upload Audio File"), # Audio file upload ], outputs="text", theme="huggingface", title="Wav2Vec2-Bert demo - transcribe Czech Audio", description=( "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the fine-tuned" f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) from Whisper Fine Tuning Sprint Event 2022 " "and 🤗 Transformers to transcribe audio files of arbitrary length." ), allow_flagging="never", ) iface.launch()