|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
import datasets |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
"""CREMA-D (Crowd-sourced Emotional Multimodal Actors Dataset)""" |
|
|
|
_CITATION = """\ |
|
@article{cao2014crema, |
|
title={CREMA-D: Crowd-sourced Emotional Multimodal Actors Dataset}, |
|
author={Cao, H. and Cooper, D. G. and Keutmann, M. K. and Gur, R. C. and Nenkova, A. and Verma, R.}, |
|
journal={IEEE transactions on affective computing}, |
|
volume={5}, |
|
number={4}, |
|
pages={377--390}, |
|
year={2014}, |
|
doi={10.1109/TAFFC.2014.2336244}, |
|
url={https://doi.org/10.1109/TAFFC.2014.2336244} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
CREMA-D is a data set of 7,442 original clips from 91 actors. |
|
These clips were from 48 male and 43 female actors between the ages of 20 and 74 |
|
coming from a variety of races and ethnicities (African America, Asian, Caucasian, Hispanic, and Unspecified). |
|
Actors spoke from a selection of 12 sentences. |
|
The sentences were presented using one of six different emotions (Anger, Disgust, Fear, Happy, Neutral and Sad) |
|
and four different emotion levels (Low, Medium, High and Unspecified). |
|
Participants rated the emotion and emotion levels based on the combined audiovisual presentation, |
|
the video alone, and the audio alone. Due to the large number of ratings needed, this effort was crowd-sourced |
|
and a total of 2443 participants each rated 90 unique clips, 30 audio, 30 visual, and 30 audio-visual. |
|
95% of the clips have more than 7 rating. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/CheyneyComputerScience/CREMA-D" |
|
_LICENSE = "ODbL" |
|
|
|
_ROOT_DIR = "crema_d" |
|
_DATA_URL = f"data/{_ROOT_DIR}.tar.gz" |
|
|
|
|
|
_SENTENCE_MAP = { |
|
"IEO": "It's eleven o'clock", |
|
"TIE": "That is exactly what happened", |
|
"IOM": "I'm on my way to the meeting", |
|
"IWW": "I wonder what this is about", |
|
"TAI": "The airplane is almost full", |
|
"MTI": "Maybe tomorrow it will be cold", |
|
"IWL": "I would like a new alarm clock", |
|
"ITH": "I think I have a doctor's appointment", |
|
"DFA": "Don't forget a jacket", |
|
"ITS": "I think I've seen this before", |
|
"TSI": "The surface is slick", |
|
"WSI": "We'll stop in a couple of minutes", |
|
} |
|
|
|
|
|
_EMOTION_MAP = { |
|
"NEU": "neutral", |
|
"HAP": "happy", |
|
"SAD": "sad", |
|
"ANG": "anger", |
|
"FEA": "fear", |
|
"DIS": "disgust", |
|
} |
|
|
|
_INTENSITY_MAP = { |
|
"LO": "Low", |
|
"MD": "Medium", |
|
"HI": "High", |
|
"XX": "Unspecified", |
|
|
|
"X": "Unspecified", |
|
} |
|
|
|
_CLASS_NAMES = list(_EMOTION_MAP.values()) |
|
|
|
|
|
class CremaDDataset(datasets.GeneratorBasedBuilder): |
|
"""The Crema-D dataset""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
sampling_rate = 16_000 |
|
features = datasets.Features( |
|
{ |
|
|
|
"audio": datasets.Audio(sampling_rate=sampling_rate), |
|
"actor_id": datasets.Value("string"), |
|
"sentence": datasets.Value("string"), |
|
|
|
"intensity": datasets.Value("string"), |
|
"label": datasets.ClassLabel(names=_CLASS_NAMES), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
license=_LICENSE, |
|
|
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
archive = dl_manager.download(_DATA_URL) |
|
local_extracted_archive = ( |
|
dl_manager.extract(archive) if not dl_manager.is_streaming else None |
|
) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
|
|
"local_extracted_archive": local_extracted_archive, |
|
"audio_files": dl_manager.iter_archive(archive), |
|
}, |
|
) |
|
] |
|
|
|
def _generate_examples(self, local_extracted_archive, audio_files): |
|
"4digitActorId_sentenceId_emotionId_emotionLevel" |
|
|
|
id_ = 0 |
|
for path, f in audio_files: |
|
path = os.path.join( |
|
local_extracted_archive, path |
|
) |
|
filename = os.path.basename(path) |
|
with open(path, "rb") as f: |
|
audio_bytes = f.read() |
|
actor_id, sentence_id, emotion_id, emotion_level = filename.split(".")[ |
|
0 |
|
].split("_") |
|
base = { |
|
"path": path, |
|
"actor_id": actor_id, |
|
"sentence": _SENTENCE_MAP[sentence_id], |
|
"label": _EMOTION_MAP[emotion_id], |
|
"emotion_intensity": _INTENSITY_MAP[emotion_level], |
|
} |
|
audio = {"path": path, "bytes": audio_bytes} |
|
yield id_, {**base, "audio": audio} |
|
id_ += 1 |
|
|