Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,596 Bytes
d1fb9a5 864e9d8 a1288b8 d1fb9a5 a1288b8 864e9d8 a1288b8 90f8c97 864e9d8 d1fb9a5 a1288b8 864e9d8 90f8c97 a1288b8 864e9d8 a1288b8 864e9d8 a1288b8 d1fb9a5 864e9d8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
import gradio as gr
import spaces
import torch
#import librosa
#import numpy as np
from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model_name = "Hemg/human-emotion-detection"
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(model_name)
model = Wav2Vec2ForSequenceClassification.from_pretrained(model_name).to(device)
def preprocess_audio(audio):
#audio_array, sampling_rate = librosa.load(audio, sr=16000) # Load and resample to 16kHz
#return {'speech': audio_array, 'sampling_rate': sampling_rate}
@spaces.GPU
def inference(audio):
print('hello')
'''
example = preprocess_audio(audio)
inputs = feature_extractor(example['speech'], sampling_rate=16000, return_tensors="pt", padding=True)
inputs = inputs.to(device) # Move inputs to GPU
with torch.no_grad():
logits = model(**inputs).logits
predicted_ids = torch.argmax(logits, dim=-1)
return model.config.id2label[predicted_ids.item()], logits, predicted_ids # Move tensors back to CPU for further processing
'''
iface = gr.Interface(fn=inference,
inputs=gr.Audio(type="filepath"),
outputs=[gr.Label(label="Predicted Sentiment"),
gr.JSON(label="Logits"),
gr.JSON(label="Predicted ID")],
title="Audio Sentiment Analysis",
description="Upload an audio file or record one to analyze sentiment.")
iface.launch(share=True) |