import os import gradio as gr from transformers import pipeline pipeline = pipeline(task="automatic-speech-recognition", model="jonatasgrosman/wav2vec2-large-xlsr-53-german") #pipeline = pipeline(task="automatic-speech-recognition", model="openai/whisper-large") def transcribeFile(audio_path : str) -> str: transcription = pipeline(audio_path) return transcription["text"] def transcribeMic(audio): sr, data = audio transcription = pipeline(data) return transcription["text"] app1 = gr.Interface( fn=transcribeFile, inputs=gr.inputs.Audio(label="Upload audio file", type="filepath"), outputs="text" ) app2 = gr.Interface( fn=transcribeFile, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text" ) title = "Speech to text for German" demo = gr.TabbedInterface(title=title, [app1, app2], ["Audio File", "Microphone"]) if __name__ == "__main__": demo.launch()