|
"""EMT dataset.""" |
|
|
|
import os |
|
import datasets |
|
|
|
|
|
_HOMEPAGE = "https://github.com/AV-Lab/emt-dataset" |
|
|
|
_LICENSE = "CC-BY-SA 4.0" |
|
|
|
_CITATION = """ |
|
@article{EMTdataset2025, |
|
title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region}, |
|
author={Nadya Abdel Madjid and Murad Mebrahtu and Abdelmoamen Nasser and Bilal Hassan and Naoufel Werghi and Jorge Dias and Majid Khonji}, |
|
year={2025}, |
|
eprint={2502.19260}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CV}, |
|
url={https://arxiv.org/abs/2502.19260} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
A multi-task dataset for detection, tracking, prediction, and intention prediction. |
|
This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection + tracking. |
|
""" |
|
|
|
|
|
_ANNOTATION_REPO = "https://huggingface.co/datasets/Murdism/EMT/resolve/main/labels" |
|
|
|
|
|
_TRAIN_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_images.tar.gz" |
|
_TEST_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_images.tar.gz" |
|
|
|
|
|
class EMT(datasets.GeneratorBasedBuilder): |
|
"""EMT dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="full_size", |
|
description="All images are in their original size.", |
|
version=datasets.Version("1.0.0"), |
|
) |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"objects": datasets.Sequence( |
|
{ |
|
"bbox": datasets.Sequence(datasets.Value("float32"), length=4), |
|
"class_id": datasets.Value("int32"), |
|
"track_id": datasets.Value("int32"), |
|
"class_name": datasets.Value("string"), |
|
} |
|
), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Download and extract train/test images and annotations.""" |
|
image_paths = { |
|
"train": dl_manager.download_and_extract(_TRAIN_IMAGE_ARCHIVE_URL), |
|
"test": dl_manager.download_and_extract(_TEST_IMAGE_ARCHIVE_URL), |
|
} |
|
|
|
annotation_paths = { |
|
"train": dl_manager.download_and_extract(f"{_ANNOTATION_REPO}/train/"), |
|
"test": dl_manager.download_and_extract(f"{_ANNOTATION_REPO}/test/"), |
|
} |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"images": dl_manager.iter_archive(image_paths["train"]), |
|
"annotation_path": annotation_paths["train"], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"images": dl_manager.iter_archive(image_paths["test"]), |
|
"annotation_path": annotation_paths["test"], |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, images, annotation_path): |
|
"""Generate dataset examples by matching images to their corresponding annotations.""" |
|
annotations = {} |
|
|
|
for file_path, file_obj in images: |
|
img_name = os.path.basename(file_path) |
|
video_name = os.path.basename(os.path.dirname(file_path)) |
|
|
|
ann_file = os.path.join(annotation_path, f"{video_name}.txt") |
|
|
|
if os.path.exists(ann_file): |
|
if ann_file not in annotations: |
|
annotations[ann_file] = {} |
|
|
|
if img_name not in annotations[ann_file]: |
|
annotations[ann_file][img_name] = [] |
|
|
|
with open(ann_file, "r", encoding="utf-8") as f: |
|
for line in f: |
|
parts = line.strip().split() |
|
if len(parts) < 8: |
|
continue |
|
|
|
frame_id, track_id, class_name = parts[:3] |
|
bbox = list(map(float, parts[4:8])) |
|
class_id = _GT_OBJECT_CLASSES.get(class_name, -1) |
|
|
|
if f"{frame_id}.jpg" == img_name: |
|
annotations[ann_file][img_name].append( |
|
{ |
|
"bbox": bbox, |
|
"class_id": class_id, |
|
"track_id": int(track_id), |
|
"class_name": class_name, |
|
} |
|
) |
|
|
|
|
|
idx = 0 |
|
for file_path, file_obj in images: |
|
img_name = os.path.basename(file_path) |
|
video_name = os.path.basename(os.path.dirname(file_path)) |
|
ann_file = os.path.join(annotation_path, f"{video_name}.txt") |
|
|
|
if ann_file in annotations and img_name in annotations[ann_file]: |
|
yield idx, { |
|
"image": {"path": file_path, "bytes": file_obj.read()}, |
|
"objects": annotations[ann_file][img_name], |
|
} |
|
idx += 1 |
|
|