|
import gradio as gr |
|
import torch |
|
from model import ECAPA_gender |
|
|
|
|
|
model = ECAPA_gender.from_pretrained('JaesungHuh/ecapa-gender') |
|
model.eval() |
|
|
|
def predict_gender(filepath): |
|
audio = model.load_audio(filepath) |
|
with torch.no_grad(): |
|
output = model.forward(audio) |
|
probs = torch.softmax(output, dim=1) |
|
prob_dict = {'Human ' + model.pred2gender[i]: float(prob) for i, prob in enumerate(probs[0])} |
|
return prob_dict |
|
|
|
audio_component = gr.Audio(type='filepath', label='Upload your audio file here') |
|
label_component = gr.Label(label='Gender classification result') |
|
demo = gr.Interface(fn=predict_gender, inputs=audio_component, outputs=label_component, examples=['00001.wav', '00002.wav']) |
|
demo.launch() |