File size: 5,630 Bytes
88e3654 81368b8 88e3654 4b9d07c 88e3654 81368b8 4b9d07c 88e3654 81368b8 999146f 88e3654 81368b8 88e3654 2f6e05a 88e3654 81368b8 88e3654 5246ac6 dfb9bd5 81368b8 dfb9bd5 88e3654 dfb9bd5 88e3654 dfb9bd5 88e3654 dfb9bd5 88e3654 81368b8 dfb9bd5 81368b8 75e71e7 2f6e05a dfb9bd5 75e71e7 dfb9bd5 75e71e7 dfb9bd5 75e71e7 dfb9bd5 75e71e7 dfb9bd5 75e71e7 dfb9bd5 2f6e05a 88e3654 2f6e05a dfb9bd5 75e71e7 88e3654 75e71e7 88e3654 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
"""EMT dataset."""
import os
import datasets
_HOMEPAGE = "https://github.com/AV-Lab/emt-dataset"
_LICENSE = "CC-BY-SA 4.0"
_CITATION = """
@article{EMTdataset2025,
title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region},
author={Nadya Abdel Madjid and Murad Mebrahtu and Abdelmoamen Nasser and Bilal Hassan and Naoufel Werghi and Jorge Dias and Majid Khonji},
year={2025},
eprint={2502.19260},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2502.19260}
}
"""
_DESCRIPTION = """\
A multi-task dataset for detection, tracking, prediction, and intention prediction.
This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection and tracking.
"""
# Annotation repository
_ANNOTATION_REPO = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/annotations"
# Tar file URLs for images
_TRAIN_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_images.tar.gz"
_TEST_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_images.tar.gz"
class EMT(datasets.GeneratorBasedBuilder):
"""EMT dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="full_size",
description="All images are in their original size.",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"objects": datasets.Sequence(
{
"bbox": datasets.Sequence(datasets.Value("float32"), length=4),
"class_id": datasets.Value("int32"),
"track_id": datasets.Value("int32"),
"class_name": datasets.Value("string"),
}
),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Download train/test images and annotations."""
image_urls = {
"train": _TRAIN_IMAGE_ARCHIVE_URL,
"test": _TEST_IMAGE_ARCHIVE_URL,
}
# Download the individual annotation files for train and test
annotation_urls = {
"train": _ANNOTATION_REPO + "/train/",
"test": _ANNOTATION_REPO + "/test/",
}
# Download image files
images = {
"train": dl_manager.iter_archive(image_urls["train"]),
"test": dl_manager.iter_archive(image_urls["test"]),
}
# Download annotation files
annotations = {
"train": dl_manager.download_and_extract(annotation_urls["train"]),
"test": dl_manager.download_and_extract(annotation_urls["test"]),
}
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": images["train"],
"annotation_path": annotations["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"images": images["test"],
"annotation_path": annotations["test"],
},
),
]
def _generate_examples(self, images, annotation_path):
"""Generate dataset examples by matching images to their corresponding annotations."""
annotations = {}
# Load all annotations into memory
for ann_file in os.listdir(annotation_path):
video_name = os.path.splitext(ann_file)[0] # Get video folder name
ann_path = os.path.join(annotation_path, ann_file)
with open(ann_path, "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split()
if len(parts) < 8:
continue
frame_id, track_id, class_name = parts[:3]
bbox = list(map(float, parts[4:8]))
class_id = _GT_OBJECT_CLASSES.get(class_name, -1)
img_name = f"{frame_id}.jpg"
# Store annotation in a dictionary
key = f"{video_name}/{img_name}"
if key not in annotations:
annotations[key] = []
annotations[key].append(
{
"bbox": bbox,
"class_id": class_id,
"track_id": int(track_id),
"class_name": class_name,
}
)
# Yield dataset entries
idx = 0
for file_path, file_obj in images:
img_name = os.path.basename(file_path)
video_name = os.path.basename(os.path.dirname(file_path)) # Match the video folder
key = f"{video_name}/{img_name}"
if key in annotations:
yield idx, {
"image": {"path": file_path, "bytes": file_obj.read()},
"objects": annotations[key],
}
idx += 1
|