Spaces:
Runtime error
Runtime error
File size: 1,683 Bytes
0899e82 e0c2b71 7dd7859 648fa39 7dd7859 dbe0d2a b67345a dd9eb54 dbe0d2a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import gradio as gr
import torch
import whisper
### ββββββββββββββββββββββββββββββββββββββββ
title="Whisper to Emotion"
### ββββββββββββββββββββββββββββββββββββββββ
whisper_model = whisper.load_model("small")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def translate(audio):
print("""
β
Sending audio to Whisper ...
β
""")
audio = whisper.load_audio(audio)
audio = whisper.pad_or_trim(audio)
mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
_, probs = whisper_model.detect_language(mel)
transcript_options = whisper.DecodingOptions(task="transcribe", fp16 = False)
translate_options = whisper.DecodingOptions(task="translate", fp16 = False)
transcription = whisper.decode(whisper_model, mel, transcript_options)
translation = whisper.decode(whisper_model, mel, translate_options)
print("Language Spoken: " + transcription.language)
print("Transcript: " + transcription.text)
print("Translated: " + translation.text)
return transcription.text
with gr.Blocks() as demo:
gr.Markdown("""
# Emotion Detection From Speech Using Whisper
""")
audio_input = gr.Audio(label = 'Record Audio Input',source="microphone",type="filepath")
transcript_output = gr.Textbox(label="Transcription in your the language you spoke")
iface = gr.Interface(fn=translate, inputs=audio_input, outputs=transcript_output)
demo.launch() |