File size: 4,915 Bytes
88e3654 2f6e05a 88e3654 2f6e05a 15372ff 88e3654 2f6e05a 7a5a207 f9fc69e 88e3654 04db8c1 88e3654 2f6e05a 88e3654 162faa0 88e3654 2f6e05a 88e3654 2f6e05a 88e3654 2f6e05a 88e3654 2f6e05a 04db8c1 2f6e05a 88e3654 2f6e05a 88e3654 2f6e05a 88e3654 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
"""EMT dataset."""
import os
import datasets
_HOMEPAGE = "https://github.com/AV-Lab/emt-dataset"
_LICENSE = "CC-BY-SA 4.0"
_CITATION = """
@article{EMTdataset2025,
title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region},
author={Nadya Abdel Madjid and Murad Mebrahtu and Abdelmoamen Nasser and Bilal Hassan and Naoufel Werghi and Jorge Dias and Majid Khonji},
year={2025},
eprint={2502.19260},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2502.19260}
}
"""
_DESCRIPTION = """\
A multi-task dataset for detection, tracking, prediction, and intention prediction.
This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection + tracking.
"""
# Image archive URL
_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/emt_images.tar.gz"
# Annotations URL (organized in train/test subfolders)
_ANNOTATION_REPO = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/annotations"
# "https://huggingface.co/datasets/Murdism/EMT/resolve/main/annotations"
_GT_OBJECT_CLASSES = {
0: "Pedestrian",
1: "Cyclist",
2: "Motorbike",
3: "Small_motorised_vehicle",
4: "Car",
5: "Medium_vehicle",
6: "Large_vehicle",
7: "Bus",
8: "Emergency_vehicle",
}
# Update: Consider using a predefined set of object classes for easier filtering
OBJECT_CLASSES = {v: k for k, v in _GT_OBJECT_CLASSES.items()}
class EMT(datasets.GeneratorBasedBuilder):
"""EMT dataset."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"objects": datasets.Sequence(
{
"bbox": datasets.Sequence(datasets.Value("float32")),
"class_id": datasets.Value("int32"),
"track_id": datasets.Value("int32"),
"class_name": datasets.Value("string"),
}
),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
archive_path = dl_manager.download(_IMAGE_ARCHIVE_URL)
annotation_paths = {
"train": dl_manager.download_and_extract(f"{_ANNOTATION_REPO}/train/"),
"test": dl_manager.download_and_extract(f"{_ANNOTATION_REPO}/test/"),
}
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": dl_manager.iter_archive(archive_path),
"annotation_path": annotation_paths["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"images": dl_manager.iter_archive(archive_path),
"annotation_path": annotation_paths["test"],
},
),
]
def _generate_examples(self, images, annotation_path):
"""Generate examples from annotations and image archive."""
# Load annotation files
annotations = {}
for root, _, files in os.walk(annotation_path):
for file in files:
with open(os.path.join(root, file), "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split()
frame_id, track_id, class_name = parts[:3]
bbox = list(map(float, parts[4:8])) # Extract bounding box
class_id = _GT_OBJECT_CLASSES.get(class_name, -1) # Convert class_name to numeric ID, default to -1 if not found
img_path = f"{frame_id}.jpg"
if img_path not in annotations:
annotations[img_path] = []
annotations[img_path].append(
{
"bbox": bbox,
"class_id": class_id,
"track_id": int(track_id),
"class_name": class_name,
}
)
# Yield dataset entries
idx = 0
for file_path, file_obj in images:
img_name = os.path.basename(file_path)
if img_name in annotations:
yield idx, {
"image": {"path": file_path, "bytes": file_obj.read()},
"objects": annotations[img_name],
}
idx += 1
|