import gradio as gr import torch import whisper from transformers import pipeline ### ———————————————————————————————————————— title="Whisper to Emotion" ### ———————————————————————————————————————— whisper_model = whisper.load_model("small") device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") emotion_classifier = pipeline("text-classification",model='bhadresh-savani/distilbert-base-uncased-emotion') def translate_and_classify(audio): print(""" — Sending audio to Whisper ... — """) audio = whisper.load_audio(audio) audio = whisper.pad_or_trim(audio) mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device) _, probs = whisper_model.detect_language(mel) transcript_options = whisper.DecodingOptions(task="transcribe", fp16 = False) translate_options = whisper.DecodingOptions(task="translate", fp16 = False) transcription = whisper.decode(whisper_model, mel, transcript_options) translation = whisper.decode(whisper_model, mel, translate_options) print("Language Spoken: " + transcription.language) print("Transcript: " + transcription.text) print("Translated: " + translation.text) emotion = emotion_classifier(translation.text) detected_emotion = emotion[0]["label"] return transcription.text, detected_emotion with gr.Blocks() as demo: gr.Markdown(""" ## Emotion Detection From Speech with Whisper """) with gr.Row(): with gr.Column(): #gr.Markdown(""" ### Record audio """) audio_input = gr.Audio(label = 'Record Audio Input',source="microphone",type="filepath") with gr.Row(): transcribe_audio = gr.Button('Transcribe') with gr.Column(): with gr.Row(): transcript_output = gr.Textbox(label="Transcription in the language you spoke", lines = 3) emotion_output = gr.Textbox(label = "Detected Emotion") transcribe_audio.click(translate_and_classify, inputs = audio_input, outputs = [transcript_output,emotion_output]) gr.HTML('''
''') demo.launch()