Spaces:
Runtime error
Runtime error
Commit
·
f9e272f
1
Parent(s):
ce8819c
Update app.py
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ whisper_model = whisper.load_model("small")
|
|
13 |
|
14 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
15 |
|
16 |
-
emotion_classifier = pipeline("text-classification",model='bhadresh-savani/distilbert-base-uncased-emotion')
|
17 |
|
18 |
def translate_and_classify(audio):
|
19 |
|
@@ -40,8 +40,8 @@ def translate_and_classify(audio):
|
|
40 |
print("Translated: " + translation.text)
|
41 |
|
42 |
emotion = emotion_classifier(translation.text)
|
43 |
-
detected_emotion = emotion[0]["label"]
|
44 |
-
return transcription.text,
|
45 |
|
46 |
css = """
|
47 |
.gradio-container {
|
@@ -141,7 +141,7 @@ with gr.Blocks(css = css) as demo:
|
|
141 |
|
142 |
with gr.Row():
|
143 |
transcript_output = gr.Textbox(label="Transcription in the language you spoke", lines = 3)
|
144 |
-
emotion_output = gr.
|
145 |
|
146 |
transcribe_audio.click(translate_and_classify, inputs = audio_input, outputs = [transcript_output,emotion_output])
|
147 |
|
|
|
13 |
|
14 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
15 |
|
16 |
+
emotion_classifier = pipeline("text-classification",model='bhadresh-savani/distilbert-base-uncased-emotion', return_all_scores = True)
|
17 |
|
18 |
def translate_and_classify(audio):
|
19 |
|
|
|
40 |
print("Translated: " + translation.text)
|
41 |
|
42 |
emotion = emotion_classifier(translation.text)
|
43 |
+
#detected_emotion = emotion[0]["label"]
|
44 |
+
return transcription.text, emotion
|
45 |
|
46 |
css = """
|
47 |
.gradio-container {
|
|
|
141 |
|
142 |
with gr.Row():
|
143 |
transcript_output = gr.Textbox(label="Transcription in the language you spoke", lines = 3)
|
144 |
+
emotion_output = gr.Label(label = "Detected Emotion")
|
145 |
|
146 |
transcribe_audio.click(translate_and_classify, inputs = audio_input, outputs = [transcript_output,emotion_output])
|
147 |
|