|
"""EMT dataset.""" |
|
|
|
import os |
|
import datasets |
|
|
|
_HOMEPAGE = "https://github.com/AV-Lab/emt-dataset" |
|
_LICENSE = "CC-BY-SA 4.0" |
|
|
|
_CITATION = """ |
|
@article{EMTdataset2025, |
|
title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region}, |
|
author={Nadya Abdel Madjid and Murad Mebrahtu and Abdelmoamen Nasser and Bilal Hassan and Naoufel Werghi and Jorge Dias and Majid Khonji}, |
|
year={2025}, |
|
eprint={2502.19260}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CV}, |
|
url={https://arxiv.org/abs/2502.19260} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
A multi-task dataset for detection, tracking, prediction, and intention prediction. |
|
This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection + tracking. |
|
""" |
|
|
|
|
|
_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/emt_images.tar.gz" |
|
|
|
|
|
_ANNOTATION_REPO = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/annotations" |
|
|
|
|
|
_GT_OBJECT_CLASSES = { |
|
0: "Pedestrian", |
|
1: "Cyclist", |
|
2: "Motorbike", |
|
3: "Small_motorised_vehicle", |
|
4: "Car", |
|
5: "Medium_vehicle", |
|
6: "Large_vehicle", |
|
7: "Bus", |
|
8: "Emergency_vehicle", |
|
} |
|
|
|
|
|
OBJECT_CLASSES = {v: k for k, v in _GT_OBJECT_CLASSES.items()} |
|
|
|
class EMT(datasets.GeneratorBasedBuilder): |
|
"""EMT dataset.""" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"objects": datasets.Sequence( |
|
{ |
|
"bbox": datasets.Sequence(datasets.Value("float32")), |
|
"class_id": datasets.Value("int32"), |
|
"track_id": datasets.Value("int32"), |
|
"class_name": datasets.Value("string"), |
|
} |
|
), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
archive_path = dl_manager.download(_IMAGE_ARCHIVE_URL) |
|
annotation_paths = { |
|
"train": dl_manager.download_and_extract(f"{_ANNOTATION_REPO}/train/"), |
|
"test": dl_manager.download_and_extract(f"{_ANNOTATION_REPO}/test/"), |
|
} |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"images": dl_manager.iter_archive(archive_path), |
|
"annotation_path": annotation_paths["train"], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"images": dl_manager.iter_archive(archive_path), |
|
"annotation_path": annotation_paths["test"], |
|
}, |
|
), |
|
] |
|
def _generate_examples(self, images, annotation_path): |
|
"""Generate examples from annotations and image archive.""" |
|
|
|
|
|
annotations = {} |
|
|
|
|
|
for file_path, file_obj in images: |
|
img_name = os.path.basename(file_path) |
|
video_name = os.path.basename(os.path.dirname(file_path)) |
|
|
|
|
|
ann_file = os.path.join(annotation_path, f"{video_name}.txt") |
|
|
|
|
|
if os.path.exists(ann_file): |
|
with open(ann_file, "r", encoding="utf-8") as f: |
|
for line in f: |
|
parts = line.strip().split() |
|
if len(parts) < 8: |
|
continue |
|
|
|
frame_id, track_id, class_name = parts[:3] |
|
bbox = list(map(float, parts[4:8])) |
|
class_id = _GT_OBJECT_CLASSES.get(class_name, -1) |
|
|
|
|
|
if f"{frame_id}.jpg" == img_name: |
|
if img_name not in annotations: |
|
annotations[img_name] = [] |
|
annotations[img_name].append( |
|
{ |
|
"bbox": bbox, |
|
"class_id": class_id, |
|
"track_id": int(track_id), |
|
"class_name": class_name, |
|
} |
|
) |
|
|
|
|
|
idx = 0 |
|
for file_path, file_obj in images: |
|
img_name = os.path.basename(file_path) |
|
if img_name in annotations: |
|
yield idx, { |
|
"image": {"path": file_path, "bytes": file_obj.read()}, |
|
"objects": annotations[img_name], |
|
} |
|
idx += 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|