Datasets:
Tasks:
Audio Classification
Sub-tasks:
keyword-spotting
Languages:
English
Size:
10K - 100K
ArXiv:
License:
File size: 5,250 Bytes
fb12e65 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
#!/usr/bin/env python
# coding: utf-8
# # Create embeddings with the transformer library
#
# We use the Huggingface transformers library to create an embedding for a an audio dataset
#
#
#
# ## tldr; Play as callable functions
import datasets
from transformers import AutoFeatureExtractor, AutoModel, ASTForAudioClassification
import torch
from renumics import spotlight
import pandas as pd
import umap
import numpy as np
_SPLIT = "train"
def __set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device == "cuda":
torch.cuda.empty_cache()
return device
def extract_embeddings(model, feature_extractor):
"""Utility to compute embeddings."""
device = model.device
def pp(batch):
audios = [element["array"] for element in batch["audio"]]
inputs = feature_extractor(raw_speech=audios, return_tensors="pt", padding=True).to(device)
embeddings = model(**inputs).last_hidden_state[:, 0].cpu()
return {"embedding": embeddings}
return pp
def huggingface_embedding(dataset, modelname, batched=True, batch_size=8):
# initialize huggingface model
feature_extractor = AutoFeatureExtractor.from_pretrained(modelname, padding=True)
model = AutoModel.from_pretrained(modelname, output_hidden_states=True)
#compute embedding
device = __set_device()
extract_fn = extract_embeddings(model.to(device), feature_extractor)
updated_dataset = dataset.map(extract_fn, batched=batched, batch_size=batch_size)
return updated_dataset
def batch_probabilities(model, feature_extractor):
device = model.device
def processing(batch):
audios = [element["array"] for element in batch["audio"]]
inputs = feature_extractor(raw_speech=audios, return_tensors="pt", padding=True, sampling_rate=16000).to(device)
outputs = model(**inputs)
return {"logits": outputs.logits}
return processing
def annotate_probabilities(dataset, modelname, batched=True, batch_size= 8):
model = ASTForAudioClassification.from_pretrained(modelname)
feature_extractor = AutoFeatureExtractor.from_pretrained(modelname, padding=True)
device = __set_device()
calc_outputs = batch_probabilities (model.to(device), feature_extractor)
output_dataset = dataset.map(calc_outputs, batched = batched, batch_size = batch_size)
return output_dataset
def annotate_batch(model, dataset):
device = model.device
def batch_annotation(batch):
logits = [torch.tensor(element) for element in batch["logits"]]
probabilities_per_class = [torch.nn.functional.softmax(logit, dim=-1) for logit in logits]
predicted_class_ids = [torch.argmax(logit).item() for logit in logits]
predicted_labels = [model.config.id2label[predicted_class_id] for predicted_class_id in predicted_class_ids]
# pre-trained model to different amount of classes
# -> id2label only reflects "internal label", not actual dataset label
annotated_labels = [labels[element] for element in batch["label"]]
probabilities = []
for index, prob_per_class in enumerate(probabilities_per_class):
probabilities.append(prob_per_class[predicted_class_ids[index]].item())
return {"Probability": probabilities, "Predicted Label": predicted_labels,
"Annotated Labels": annotated_labels, "Probability Vector": probabilities_per_class}
return batch_annotation
def annotate_dataset(dataset, modelname, batched=True, batch_size=8):
model = ASTForAudioClassification.from_pretrained(modelname)
device = __set_device()
annotate = annotate_batch(model.to(device), dataset)
annotated_dataset = dataset.map(annotate, batched=batched, batch_size=batch_size)
return annotated_dataset
# ## Step-by-step example on speech-commands
#
# ### Load speech-commands from Huggingface hub
# Use validation split to evaluate model's performance on unseen data
dataset = datasets.load_dataset('speech_commands', 'v0.01', split=_SPLIT)
labels = dataset.features["label"].names
# Let's have a look at all of the labels that we want to predict
print(labels)
# ### Compute probabilities and annotate dataset
# First, calculate logits per sample
# calculate logits for each sample and annotate
dataset_annotated = annotate_probabilities(dataset, "MIT/ast-finetuned-speech-commands-v2")
# Now annotate labels and probabilities
dataset_annotated_complete = annotate_dataset(dataset_annotated, "MIT/ast-finetuned-speech-commands-v2")
# ### Compute embedding with vision transformer from Huggingface
dataset_enriched = huggingface_embedding(dataset_annotated_complete, "MIT/ast-finetuned-speech-commands-v2")
# ### Reduce embeddings for faster visualization
embeddings = np.stack(np.array(dataset_enriched['embedding']))
reducer = umap.UMAP()
reduced_embedding = reducer.fit_transform(embeddings)
dataset_enriched = dataset_enriched.add_column("embedding_reduced", list(reduced_embedding))
print(dataset_enriched.features)
df = dataset_enriched.to_pandas()
df.to_parquet("data/dataset_audio_" + _SPLIT + ".parquet.gzip", compression='gzip') |