Spaces:
Runtime error
Runtime error
File size: 2,025 Bytes
0899e82 e0c2b71 f488509 e0c2b71 f488509 7dd7859 648fa39 7dd7859 dbe0d2a b67345a dd9eb54 dbe0d2a 3ccd8bb a846d22 3ccd8bb a846d22 dbe0d2a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import gradio as gr
import torch
import whisper
from transformers import pipeline
### ββββββββββββββββββββββββββββββββββββββββ
title="Whisper to Emotion"
### ββββββββββββββββββββββββββββββββββββββββ
whisper_model = whisper.load_model("small")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
emotion_classifier = pipeline("text-classification",model='bhadresh-savani/distilbert-base-uncased-emotion')
def translate(audio):
print("""
β
Sending audio to Whisper ...
β
""")
audio = whisper.load_audio(audio)
audio = whisper.pad_or_trim(audio)
mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
_, probs = whisper_model.detect_language(mel)
transcript_options = whisper.DecodingOptions(task="transcribe", fp16 = False)
translate_options = whisper.DecodingOptions(task="translate", fp16 = False)
transcription = whisper.decode(whisper_model, mel, transcript_options)
translation = whisper.decode(whisper_model, mel, translate_options)
print("Language Spoken: " + transcription.language)
print("Transcript: " + transcription.text)
print("Translated: " + translation.text)
return transcription.text
with gr.Blocks() as demo:
gr.Markdown("""
# Emotion Detection From Speech Using Whisper
""")
with gr.Row():
with gr.Column():
audio_input = gr.Audio(label = 'Record Audio Input',source="microphone",type="filepath")
with gr.Row():
transcribe_audio = gr.Button('Transcribe')
with gr.Column():
transcript_output = gr.Textbox(label="Transcription in the language you spoke")
transcribe_audio.click(translate, inputs = audio_input, outputs = transcript_output)
demo.launch() |