|
|
|
import os |
|
import datasets |
|
import tarfile |
|
|
|
_HOMEPAGE = "https://github.com/AV-Lab/emt-dataset" |
|
_LICENSE = "CC-BY-SA 4.0" |
|
_CITATION = """ |
|
@article{EMTdataset2025, |
|
title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region}, |
|
author={Nadya Abdel Madjid and Murad Mebrahtu and Abdelmoamen Nasser and Bilal Hassan and Naoufel Werghi and Jorge Dias and Majid Khonji}, |
|
year={2025}, |
|
eprint={2502.19260}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CV}, |
|
url={https://arxiv.org/abs/2502.19260} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
A multi-task dataset for detection, tracking, prediction, and intention prediction. |
|
This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection and tracking. |
|
""" |
|
|
|
_TRAIN_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_images.tar.gz" |
|
_TEST_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_images.tar.gz" |
|
|
|
_TRAIN_ANNOTATION_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_annotation.tar.gz" |
|
_TEST_ANNOTATION_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_annotation.tar.gz" |
|
|
|
_GT_OBJECT_CLASSES = { |
|
"Pedestrian": 0, |
|
"Cyclist" : 1, |
|
"Motorbike" : 2, |
|
"Small_motorised_vehicle" : 3, |
|
"Car" : 4, |
|
"Medium_vehicle" : 5, |
|
"Large_vehicle" : 6, |
|
"Bus" : 7, |
|
"Emergency_vehicle" : 8, |
|
} |
|
|
|
class EMT(datasets.GeneratorBasedBuilder): |
|
"""EMT dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="emt", |
|
description="Training split of the EMT dataset", |
|
version=datasets.Version("1.0.0"), |
|
), |
|
] |
|
|
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"objects": datasets.Sequence( |
|
{ |
|
"bbox": datasets.Sequence(datasets.Value("float32"), length=4), |
|
"class_id": datasets.Value("int32"), |
|
"track_id": datasets.Value("int32"), |
|
"class_name": datasets.Value("string"), |
|
} |
|
), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
"""Download (if not cached) and prepare dataset splits.""" |
|
|
|
|
|
image_urls = { |
|
"train": _TRAIN_IMAGE_ARCHIVE_URL, |
|
"test": _TEST_IMAGE_ARCHIVE_URL, |
|
} |
|
|
|
annotation_urls = { |
|
"train": _TRAIN_ANNOTATION_ARCHIVE_URL, |
|
"test": _TEST_ANNOTATION_ARCHIVE_URL, |
|
} |
|
|
|
|
|
extracted_images = dl_manager.download_and_extract(image_urls) |
|
extracted_annotations = dl_manager.download_and_extract(annotation_urls) |
|
|
|
|
|
train_annotation_path = os.path.join(extracted_annotations["train"],"EMT", "annotations", "train") |
|
test_annotation_path = os.path.join(extracted_annotations["test"],"EMT", "annotations", "test") |
|
|
|
train_image_path = extracted_images["train"] |
|
test_image_path = extracted_images["test"] |
|
|
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"image_dir": train_image_path, |
|
"annotation_path": train_annotation_path, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"image_dir": test_image_path, |
|
"annotation_path": test_annotation_path, |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, image_dir, annotation_path): |
|
"""Generate dataset examples by matching images to their corresponding annotations.""" |
|
|
|
annotations = {} |
|
|
|
|
|
if "train" in annotation_path: |
|
annotation_split = "train" |
|
elif "test" in annotation_path: |
|
annotation_split = "test" |
|
else: |
|
raise ValueError(f"Unknown annotation path: {annotation_path}") |
|
|
|
ann_dir = annotation_path |
|
|
|
print(f"Extracted annotations path: {annotation_path}") |
|
print(f"Looking for annotations in: {ann_dir}") |
|
|
|
|
|
if not os.path.exists(ann_dir): |
|
raise FileNotFoundError(f"Annotation directory does not exist: {ann_dir}") |
|
|
|
|
|
for ann_file in os.listdir(ann_dir): |
|
video_name = os.path.splitext(ann_file)[0] |
|
ann_path = os.path.join(ann_dir, ann_file) |
|
|
|
if os.path.isdir(ann_path): |
|
continue |
|
|
|
print("Processing annotation file:", ann_path) |
|
|
|
with open(ann_path, "r", encoding="utf-8") as f: |
|
for line in f: |
|
parts = line.strip().split() |
|
if len(parts) < 8: |
|
continue |
|
|
|
frame_id, track_id, class_name = parts[:3] |
|
bbox = list(map(float, parts[6:10])) |
|
class_id = _GT_OBJECT_CLASSES.get(class_name, -1) |
|
img_name = f"{frame_id}.jpg" |
|
|
|
|
|
key = f"{video_name}/{img_name}" |
|
if key not in annotations: |
|
annotations[key] = [] |
|
|
|
annotations[key].append( |
|
{ |
|
"bbox": bbox, |
|
"class_id": class_id, |
|
"track_id": int(track_id), |
|
"class_name": class_name, |
|
} |
|
) |
|
|
|
|
|
idx = 0 |
|
for root, _, files in os.walk(image_dir): |
|
for file_name in files: |
|
if not file_name.endswith((".jpg", ".png")): |
|
continue |
|
|
|
file_path = os.path.join(root, file_name) |
|
video_name = os.path.basename(root) |
|
key = f"{video_name}/{file_name}" |
|
|
|
if key in annotations: |
|
with open(file_path, "rb") as img_file: |
|
yield idx, { |
|
"image": {"path": file_path, "bytes": img_file.read()}, |
|
"objects": annotations[key], |
|
} |
|
idx += 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|