pritamdeka's picture
Update app.py
b25a472 verified
raw
history blame
981 Bytes
import gradio as gr
import whisper
# Function to process the audio file
def transcribe_audio(model_size, audio):
# Load the Whisper model based on the user's choice
model = whisper.load_model(model_size)
# Transcribe the audio file
result = model.transcribe(audio)
return result['text']
# Gradio interface with model selection
iface = gr.Interface(
fn=transcribe_audio, # The function that will process the audio and model choice
inputs=[
gr.Dropdown(label="Choose Whisper Model", choices=["tiny", "base", "small", "medium", "large"], value="base"), # Model selection
gr.Audio(type="filepath") # Audio upload input (removed 'source')
],
outputs="text", # Output transcription as text
title="Whisper Audio Transcription",
description="Upload an audio file and select a Whisper model to get the transcription."
)
# Launch the interface
iface.launch()