|
import whisper |
|
import gradio as gr |
|
|
|
model = whisper.load_hf_model(repo_id="jerpint/whisper", filename="small.pt") |
|
|
|
|
|
def transcribe(audio, translate): |
|
|
|
task = "translate" if translate else None |
|
result = model.transcribe(audio, task=task) |
|
|
|
return result["text"] |
|
|
|
|
|
gr.Interface( |
|
fn=transcribe, |
|
inputs=[ |
|
gr.Audio(source="microphone", type="filepath"), |
|
gr.Checkbox(label="Translate to english"), |
|
], |
|
examples=[ |
|
["samples/french_hello.wav", True], |
|
["samples/english_hello.wav", True], |
|
["samples/hebrew_hello.wav", True], |
|
["samples/spanish_hello.wav", True], |
|
], |
|
outputs="text", |
|
).launch() |
|
|