Spaces:
Running
Running
File size: 491 Bytes
b6002fb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 |
import gradio as gr
import whisper
# Load Whisper model
model = whisper.load_model("base")
def transcribe(audio_path):
"""Transcribes the given audio file using OpenAI's Whisper model."""
if audio_path is None:
return "No audio file provided."
# Load and transcribe the audio
result = model.transcribe(audio_path)
return result["text"]
# Create Gradio interface
app = gr.Interface(fn=transcribe, inputs=gr.Audio(type="filepath"), outputs="text")
app.launch()
|