File size: 7,219 Bytes
88e3654 2f6e05a 88e3654 2f6e05a 15372ff 88e3654 2f6e05a 7a5a207 f9fc69e 88e3654 04db8c1 88e3654 2f6e05a 88e3654 162faa0 88e3654 2f6e05a 88e3654 2f6e05a 88e3654 2f6e05a 88e3654 2cad3e7 88e3654 2f6e05a 2cad3e7 2f6e05a 2cad3e7 2f6e05a 2cad3e7 2f6e05a 2cad3e7 2f6e05a 88e3654 2f6e05a 88e3654 2f6e05a 88e3654 2cad3e7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
"""EMT dataset."""
import os
import datasets
_HOMEPAGE = "https://github.com/AV-Lab/emt-dataset"
_LICENSE = "CC-BY-SA 4.0"
_CITATION = """
@article{EMTdataset2025,
title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region},
author={Nadya Abdel Madjid and Murad Mebrahtu and Abdelmoamen Nasser and Bilal Hassan and Naoufel Werghi and Jorge Dias and Majid Khonji},
year={2025},
eprint={2502.19260},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2502.19260}
}
"""
_DESCRIPTION = """\
A multi-task dataset for detection, tracking, prediction, and intention prediction.
This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection + tracking.
"""
# Image archive URL
_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/emt_images.tar.gz"
# Annotations URL (organized in train/test subfolders)
_ANNOTATION_REPO = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/annotations"
# "https://huggingface.co/datasets/Murdism/EMT/resolve/main/annotations"
_GT_OBJECT_CLASSES = {
0: "Pedestrian",
1: "Cyclist",
2: "Motorbike",
3: "Small_motorised_vehicle",
4: "Car",
5: "Medium_vehicle",
6: "Large_vehicle",
7: "Bus",
8: "Emergency_vehicle",
}
# Update: Consider using a predefined set of object classes for easier filtering
OBJECT_CLASSES = {v: k for k, v in _GT_OBJECT_CLASSES.items()}
class EMT(datasets.GeneratorBasedBuilder):
"""EMT dataset."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"objects": datasets.Sequence(
{
"bbox": datasets.Sequence(datasets.Value("float32")),
"class_id": datasets.Value("int32"),
"track_id": datasets.Value("int32"),
"class_name": datasets.Value("string"),
}
),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
archive_path = dl_manager.download(_IMAGE_ARCHIVE_URL)
annotation_paths = {
"train": dl_manager.download_and_extract(f"{_ANNOTATION_REPO}/train/"),
"test": dl_manager.download_and_extract(f"{_ANNOTATION_REPO}/test/"),
}
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": dl_manager.iter_archive(archive_path),
"annotation_path": annotation_paths["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"images": dl_manager.iter_archive(archive_path),
"annotation_path": annotation_paths["test"],
},
),
]
def _generate_examples(self, images, annotation_path):
"""Generate examples from annotations and image archive."""
# Dictionary to store annotations
annotations = {}
# Process each image in the dataset
for file_path, file_obj in images:
img_name = os.path.basename(file_path) # e.g., "000001.jpg"
video_name = os.path.basename(os.path.dirname(file_path)) # e.g., "video_1112"
# Expected annotation file
ann_file = os.path.join(annotation_path, f"{video_name}.txt")
# Read annotations only for the current video
if os.path.exists(ann_file):
with open(ann_file, "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split()
if len(parts) < 8: # Ensure there are enough elements
continue
frame_id, track_id, class_name = parts[:3]
bbox = list(map(float, parts[4:8])) # Extract bounding box
class_id = _GT_OBJECT_CLASSES.get(class_name, -1) # Convert class_name to numeric ID
# Match annotations to the correct image
if f"{frame_id}.jpg" == img_name:
if img_name not in annotations:
annotations[img_name] = []
annotations[img_name].append(
{
"bbox": bbox,
"class_id": class_id,
"track_id": int(track_id),
"class_name": class_name,
}
)
# Yield dataset entries
idx = 0
for file_path, file_obj in images:
img_name = os.path.basename(file_path)
if img_name in annotations:
yield idx, {
"image": {"path": file_path, "bytes": file_obj.read()},
"objects": annotations[img_name],
}
idx += 1
# def _generate_examples(self, images, annotation_path):
# """Generate examples from annotations and image archive."""
# # Load annotation files
# annotations = {}
# for root, _, files in os.walk(annotation_path):
# for file in files:
# with open(os.path.join(root, file), "r", encoding="utf-8") as f:
# for line in f:
# parts = line.strip().split()
# frame_id, track_id, class_name = parts[:3]
# bbox = list(map(float, parts[4:8])) # Extract bounding box
# class_id = _GT_OBJECT_CLASSES.get(class_name, -1) # Convert class_name to numeric ID, default to -1 if not found
# img_path = f"{frame_id}.jpg"
# if img_path not in annotations:
# annotations[img_path] = []
# annotations[img_path].append(
# {
# "bbox": bbox,
# "class_id": class_id,
# "track_id": int(track_id),
# "class_name": class_name,
# }
# )
# # Yield dataset entries
# idx = 0
# for file_path, file_obj in images:
# img_name = os.path.basename(file_path)
# if img_name in annotations:
# yield idx, {
# "image": {"path": file_path, "bytes": file_obj.read()},
# "objects": annotations[img_name],
# }
# idx += 1
|