File size: 5,551 Bytes
88e3654 81368b8 88e3654 81368b8 88e3654 2f6e05a 88e3654 81368b8 88e3654 81368b8 999146f 88e3654 81368b8 88e3654 2f6e05a 88e3654 81368b8 88e3654 81368b8 88e3654 2f6e05a 88e3654 2f6e05a 88e3654 81368b8 88e3654 81368b8 88e3654 81368b8 2f6e05a 81368b8 2f6e05a 2cad3e7 81368b8 2cad3e7 2f6e05a 81368b8 2cad3e7 81368b8 2f6e05a 81368b8 2cad3e7 81368b8 2cad3e7 2f6e05a 88e3654 2f6e05a 81368b8 88e3654 81368b8 88e3654 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 |
"""EMT dataset."""
import os
import datasets
_HOMEPAGE = "https://github.com/AV-Lab/emt-dataset"
_LICENSE = "CC-BY-SA 4.0"
_CITATION = """
@article{EMTdataset2025,
title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region},
author={Nadya Abdel Madjid and Murad Mebrahtu and Abdelmoamen Nasser and Bilal Hassan and Naoufel Werghi and Jorge Dias and Majid Khonji},
year={2025},
eprint={2502.19260},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2502.19260}
}
"""
_DESCRIPTION = """\
A multi-task dataset for detection, tracking, prediction, and intention prediction.
This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection + tracking.
"""
# Annotation repository
_ANNOTATION_REPO = "https://huggingface.co/datasets/Murdism/EMT/resolve/main/labels"
# Tar file URLs for images
_TRAIN_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_images.tar.gz"
_TEST_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_images.tar.gz"
class EMT(datasets.GeneratorBasedBuilder):
"""EMT dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="full_size",
description="All images are in their original size.",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"objects": datasets.Sequence(
{
"bbox": datasets.Sequence(datasets.Value("float32"), length=4),
"class_id": datasets.Value("int32"),
"track_id": datasets.Value("int32"),
"class_name": datasets.Value("string"),
}
),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Download and extract train/test images and annotations."""
image_paths = {
"train": dl_manager.download_and_extract(_TRAIN_IMAGE_ARCHIVE_URL),
"test": dl_manager.download_and_extract(_TEST_IMAGE_ARCHIVE_URL),
}
annotation_paths = {
"train": dl_manager.download_and_extract(f"{_ANNOTATION_REPO}/train/"),
"test": dl_manager.download_and_extract(f"{_ANNOTATION_REPO}/test/"),
}
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": dl_manager.iter_archive(image_paths["train"]),
"annotation_path": annotation_paths["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"images": dl_manager.iter_archive(image_paths["test"]),
"annotation_path": annotation_paths["test"],
},
),
]
def _generate_examples(self, images, annotation_path):
"""Generate dataset examples by matching images to their corresponding annotations."""
annotations = {}
for file_path, file_obj in images:
img_name = os.path.basename(file_path) # e.g., "000001.jpg"
video_name = os.path.basename(os.path.dirname(file_path)) # e.g., "video_1112"
ann_file = os.path.join(annotation_path, f"{video_name}.txt")
if os.path.exists(ann_file):
if ann_file not in annotations:
annotations[ann_file] = {}
if img_name not in annotations[ann_file]:
annotations[ann_file][img_name] = []
with open(ann_file, "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split()
if len(parts) < 8:
continue
frame_id, track_id, class_name = parts[:3]
bbox = list(map(float, parts[4:8]))
class_id = _GT_OBJECT_CLASSES.get(class_name, -1)
if f"{frame_id}.jpg" == img_name:
annotations[ann_file][img_name].append(
{
"bbox": bbox,
"class_id": class_id,
"track_id": int(track_id),
"class_name": class_name,
}
)
# Yield dataset entries
idx = 0
for file_path, file_obj in images:
img_name = os.path.basename(file_path)
video_name = os.path.basename(os.path.dirname(file_path))
ann_file = os.path.join(annotation_path, f"{video_name}.txt")
if ann_file in annotations and img_name in annotations[ann_file]:
yield idx, {
"image": {"path": file_path, "bytes": file_obj.read()},
"objects": annotations[ann_file][img_name],
}
idx += 1
|