Datasets:
Tasks:
Audio Classification
Sub-tasks:
keyword-spotting
Languages:
English
Size:
10K - 100K
ArXiv:
License:
#!/usr/bin/env python | |
# coding: utf-8 | |
# # Create embeddings with the transformer library | |
# | |
# We use the Huggingface transformers library to create an embedding for a an audio dataset | |
# | |
# | |
# | |
# ## tldr; Play as callable functions | |
import datasets | |
from transformers import AutoFeatureExtractor, AutoModel, ASTForAudioClassification | |
import torch | |
from renumics import spotlight | |
import pandas as pd | |
import umap | |
import numpy as np | |
_SPLIT = "train" | |
def __set_device(): | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
if device == "cuda": | |
torch.cuda.empty_cache() | |
return device | |
def extract_embeddings(model, feature_extractor): | |
"""Utility to compute embeddings.""" | |
device = model.device | |
def pp(batch): | |
audios = [element["array"] for element in batch["audio"]] | |
inputs = feature_extractor(raw_speech=audios, return_tensors="pt", padding=True).to(device) | |
embeddings = model(**inputs).last_hidden_state[:, 0].cpu() | |
return {"embedding": embeddings} | |
return pp | |
def huggingface_embedding(dataset, modelname, batched=True, batch_size=8): | |
# initialize huggingface model | |
feature_extractor = AutoFeatureExtractor.from_pretrained(modelname, padding=True) | |
model = AutoModel.from_pretrained(modelname, output_hidden_states=True) | |
#compute embedding | |
device = __set_device() | |
extract_fn = extract_embeddings(model.to(device), feature_extractor) | |
updated_dataset = dataset.map(extract_fn, batched=batched, batch_size=batch_size) | |
return updated_dataset | |
def batch_probabilities(model, feature_extractor): | |
device = model.device | |
def processing(batch): | |
audios = [element["array"] for element in batch["audio"]] | |
inputs = feature_extractor(raw_speech=audios, return_tensors="pt", padding=True, sampling_rate=16000).to(device) | |
outputs = model(**inputs) | |
return {"logits": outputs.logits} | |
return processing | |
def annotate_probabilities(dataset, modelname, batched=True, batch_size= 8): | |
model = ASTForAudioClassification.from_pretrained(modelname) | |
feature_extractor = AutoFeatureExtractor.from_pretrained(modelname, padding=True) | |
device = __set_device() | |
calc_outputs = batch_probabilities (model.to(device), feature_extractor) | |
output_dataset = dataset.map(calc_outputs, batched = batched, batch_size = batch_size) | |
return output_dataset | |
def annotate_batch(model, dataset): | |
device = model.device | |
def batch_annotation(batch): | |
logits = [torch.tensor(element) for element in batch["logits"]] | |
probabilities_per_class = [torch.nn.functional.softmax(logit, dim=-1) for logit in logits] | |
predicted_class_ids = [torch.argmax(logit).item() for logit in logits] | |
predicted_labels = [model.config.id2label[predicted_class_id] for predicted_class_id in predicted_class_ids] | |
# pre-trained model to different amount of classes | |
# -> id2label only reflects "internal label", not actual dataset label | |
annotated_labels = [labels[element] for element in batch["label"]] | |
probabilities = [] | |
for index, prob_per_class in enumerate(probabilities_per_class): | |
probabilities.append(prob_per_class[predicted_class_ids[index]].item()) | |
return {"Probability": probabilities, "Predicted Label": predicted_labels, | |
"Annotated Labels": annotated_labels, "Probability Vector": probabilities_per_class} | |
return batch_annotation | |
def annotate_dataset(dataset, modelname, batched=True, batch_size=8): | |
model = ASTForAudioClassification.from_pretrained(modelname) | |
device = __set_device() | |
annotate = annotate_batch(model.to(device), dataset) | |
annotated_dataset = dataset.map(annotate, batched=batched, batch_size=batch_size) | |
return annotated_dataset | |
# ## Step-by-step example on speech-commands | |
# | |
# ### Load speech-commands from Huggingface hub | |
# Use validation split to evaluate model's performance on unseen data | |
dataset = datasets.load_dataset('speech_commands', 'v0.01', split=_SPLIT) | |
labels = dataset.features["label"].names | |
# Let's have a look at all of the labels that we want to predict | |
print(labels) | |
# ### Compute probabilities and annotate dataset | |
# First, calculate logits per sample | |
# calculate logits for each sample and annotate | |
dataset_annotated = annotate_probabilities(dataset, "MIT/ast-finetuned-speech-commands-v2") | |
# Now annotate labels and probabilities | |
dataset_annotated_complete = annotate_dataset(dataset_annotated, "MIT/ast-finetuned-speech-commands-v2") | |
# ### Compute embedding with vision transformer from Huggingface | |
dataset_enriched = huggingface_embedding(dataset_annotated_complete, "MIT/ast-finetuned-speech-commands-v2") | |
# ### Reduce embeddings for faster visualization | |
embeddings = np.stack(np.array(dataset_enriched['embedding'])) | |
reducer = umap.UMAP() | |
reduced_embedding = reducer.fit_transform(embeddings) | |
dataset_enriched = dataset_enriched.add_column("embedding_reduced", list(reduced_embedding)) | |
print(dataset_enriched.features) | |
df = dataset_enriched.to_pandas() | |
df.to_parquet("data/dataset_audio_" + _SPLIT + ".parquet.gzip", compression='gzip') |