EMT / EMT.py
Murad Mebrahtu
updated script
2cad3e7
raw
history blame
7.22 kB
"""EMT dataset."""
import os
import datasets
_HOMEPAGE = "https://github.com/AV-Lab/emt-dataset"
_LICENSE = "CC-BY-SA 4.0"
_CITATION = """
@article{EMTdataset2025,
title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region},
author={Nadya Abdel Madjid and Murad Mebrahtu and Abdelmoamen Nasser and Bilal Hassan and Naoufel Werghi and Jorge Dias and Majid Khonji},
year={2025},
eprint={2502.19260},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2502.19260}
}
"""
_DESCRIPTION = """\
A multi-task dataset for detection, tracking, prediction, and intention prediction.
This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection + tracking.
"""
# Image archive URL
_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/emt_images.tar.gz"
# Annotations URL (organized in train/test subfolders)
_ANNOTATION_REPO = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/annotations"
# "https://huggingface.co/datasets/Murdism/EMT/resolve/main/annotations"
_GT_OBJECT_CLASSES = {
0: "Pedestrian",
1: "Cyclist",
2: "Motorbike",
3: "Small_motorised_vehicle",
4: "Car",
5: "Medium_vehicle",
6: "Large_vehicle",
7: "Bus",
8: "Emergency_vehicle",
}
# Update: Consider using a predefined set of object classes for easier filtering
OBJECT_CLASSES = {v: k for k, v in _GT_OBJECT_CLASSES.items()}
class EMT(datasets.GeneratorBasedBuilder):
"""EMT dataset."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"objects": datasets.Sequence(
{
"bbox": datasets.Sequence(datasets.Value("float32")),
"class_id": datasets.Value("int32"),
"track_id": datasets.Value("int32"),
"class_name": datasets.Value("string"),
}
),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
archive_path = dl_manager.download(_IMAGE_ARCHIVE_URL)
annotation_paths = {
"train": dl_manager.download_and_extract(f"{_ANNOTATION_REPO}/train/"),
"test": dl_manager.download_and_extract(f"{_ANNOTATION_REPO}/test/"),
}
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": dl_manager.iter_archive(archive_path),
"annotation_path": annotation_paths["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"images": dl_manager.iter_archive(archive_path),
"annotation_path": annotation_paths["test"],
},
),
]
def _generate_examples(self, images, annotation_path):
"""Generate examples from annotations and image archive."""
# Dictionary to store annotations
annotations = {}
# Process each image in the dataset
for file_path, file_obj in images:
img_name = os.path.basename(file_path) # e.g., "000001.jpg"
video_name = os.path.basename(os.path.dirname(file_path)) # e.g., "video_1112"
# Expected annotation file
ann_file = os.path.join(annotation_path, f"{video_name}.txt")
# Read annotations only for the current video
if os.path.exists(ann_file):
with open(ann_file, "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split()
if len(parts) < 8: # Ensure there are enough elements
continue
frame_id, track_id, class_name = parts[:3]
bbox = list(map(float, parts[4:8])) # Extract bounding box
class_id = _GT_OBJECT_CLASSES.get(class_name, -1) # Convert class_name to numeric ID
# Match annotations to the correct image
if f"{frame_id}.jpg" == img_name:
if img_name not in annotations:
annotations[img_name] = []
annotations[img_name].append(
{
"bbox": bbox,
"class_id": class_id,
"track_id": int(track_id),
"class_name": class_name,
}
)
# Yield dataset entries
idx = 0
for file_path, file_obj in images:
img_name = os.path.basename(file_path)
if img_name in annotations:
yield idx, {
"image": {"path": file_path, "bytes": file_obj.read()},
"objects": annotations[img_name],
}
idx += 1
# def _generate_examples(self, images, annotation_path):
# """Generate examples from annotations and image archive."""
# # Load annotation files
# annotations = {}
# for root, _, files in os.walk(annotation_path):
# for file in files:
# with open(os.path.join(root, file), "r", encoding="utf-8") as f:
# for line in f:
# parts = line.strip().split()
# frame_id, track_id, class_name = parts[:3]
# bbox = list(map(float, parts[4:8])) # Extract bounding box
# class_id = _GT_OBJECT_CLASSES.get(class_name, -1) # Convert class_name to numeric ID, default to -1 if not found
# img_path = f"{frame_id}.jpg"
# if img_path not in annotations:
# annotations[img_path] = []
# annotations[img_path].append(
# {
# "bbox": bbox,
# "class_id": class_id,
# "track_id": int(track_id),
# "class_name": class_name,
# }
# )
# # Yield dataset entries
# idx = 0
# for file_path, file_obj in images:
# img_name = os.path.basename(file_path)
# if img_name in annotations:
# yield idx, {
# "image": {"path": file_path, "bytes": file_obj.read()},
# "objects": annotations[img_name],
# }
# idx += 1