EMT / EMT.py
KuAvLab's picture
Update EMT.py
e4c65e7 verified
raw
history blame
16.9 kB
# import os
# import datasets
# import tarfile
# _HOMEPAGE = "https://github.com/AV-Lab/emt-dataset"
# _LICENSE = "CC-BY-SA 4.0"
# _CITATION = """
# @article{EMTdataset2025,
# title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region},
# author={Nadya Abdel Madjid and Murad Mebrahtu and Abdelmoamen Nasser and Bilal Hassan and Naoufel Werghi and Jorge Dias and Majid Khonji},
# year={2025},
# eprint={2502.19260},
# archivePrefix={arXiv},
# primaryClass={cs.CV},
# url={https://arxiv.org/abs/2502.19260}
# }
# """
# _DESCRIPTION = """\
# A multi-task dataset for detection, tracking, prediction, and intention prediction.
# This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection and tracking.
# """
# _TRAIN_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_images.tar.gz"
# _TEST_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_images.tar.gz"
# _TRAIN_ANNOTATION_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_annotation.tar.gz"
# _TEST_ANNOTATION_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_annotation.tar.gz"
# class EMT(datasets.GeneratorBasedBuilder):
# """EMT dataset."""
# BUILDER_CONFIGS = [
# datasets.BuilderConfig(
# name="full_size",
# description="All images are in their original size.",
# version=datasets.Version("1.0.0"),
# )
# ]
# def _info(self):
# return datasets.DatasetInfo(
# description=_DESCRIPTION,
# features=datasets.Features(
# {
# "image": datasets.Image(),
# "objects": datasets.Sequence(
# {
# "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
# "class_id": datasets.Value("int32"),
# "track_id": datasets.Value("int32"),
# "class_name": datasets.Value("string"),
# }
# ),
# }
# ),
# supervised_keys=None,
# homepage=_HOMEPAGE,
# license=_LICENSE,
# citation=_CITATION,
# )
# def _split_generators(self, dl_manager):
# """Download (if not cached) and prepare dataset splits."""
# image_urls = {
# "train": _TRAIN_IMAGE_ARCHIVE_URL,
# "test": _TEST_IMAGE_ARCHIVE_URL,
# }
# annotation_urls = {
# "train": _TRAIN_ANNOTATION_ARCHIVE_URL,
# "test": _TEST_ANNOTATION_ARCHIVE_URL,
# }
# # Based on the requested split, we only download the relevant data
# split = self.config.name # Determine the requested split (train or test)
# # Ensure paths are correctly resolved for the requested split
# extracted_paths = dl_manager.download_and_extract({split: annotation_urls[split]})
# image_archives = dl_manager.download_and_extract({split: image_urls[split]})
# # Ensure annotation paths point to the correct subdirectory
# annotation_path = os.path.join(extracted_paths[split], "annotations", split)
# image_path = image_archives[split]
# return [
# datasets.SplitGenerator(
# name=datasets.Split.TRAIN if split == "train" else datasets.Split.TEST,
# gen_kwargs={
# "images": dl_manager.iter_archive(image_path),
# "annotation_path": annotation_path,
# },
# ),
# ]
# # def _split_generators(self, dl_manager):
# # """Download (if not cached) and prepare dataset splits."""
# # image_urls = {
# # "train": _TRAIN_IMAGE_ARCHIVE_URL,
# # "test": _TEST_IMAGE_ARCHIVE_URL,
# # }
# # annotation_urls = {
# # "train": _TRAIN_ANNOTATION_ARCHIVE_URL,
# # "test": _TEST_ANNOTATION_ARCHIVE_URL,
# # }
# # # Ensure paths are correctly resolved
# # extracted_paths = dl_manager.download_and_extract(annotation_urls)
# # image_archives = dl_manager.download_and_extract(image_urls)
# # # ✅ Ensure annotation paths point to the correct subdirectory
# # train_annotation_path = os.path.join(extracted_paths["train"], "annotations", "train")
# # test_annotation_path = os.path.join(extracted_paths["test"], "annotations", "test")
# # return [
# # datasets.SplitGenerator(
# # name=datasets.Split.TRAIN,
# # gen_kwargs={
# # "images": dl_manager.iter_archive(image_archives["train"]),
# # "annotation_path": train_annotation_path, # ✅ Corrected path
# # },
# # ),
# # datasets.SplitGenerator(
# # name=datasets.Split.TEST,
# # gen_kwargs={
# # "images": dl_manager.iter_archive(image_archives["test"]),
# # "annotation_path": test_annotation_path, # ✅ Corrected path
# # },
# # ),
# # ]
# def _generate_examples(self, images, annotation_path):
# """Generate dataset examples by matching images to their corresponding annotations."""
# annotations = {}
# # Determine whether we're processing train or test split
# if "train" in annotation_path:
# annotation_split = "train"
# elif "test" in annotation_path:
# annotation_split = "test"
# else:
# raise ValueError(f"Unknown annotation path: {annotation_path}")
# ann_dir = annotation_path
# print(f"Extracted annotations path: {annotation_path}")
# print(f"Looking for annotations in: {ann_dir}")
# # Check if annotation directory exists
# if not os.path.exists(ann_dir):
# raise FileNotFoundError(f"Annotation directory does not exist: {ann_dir}")
# # Extract annotation files and read their contents
# for ann_file in os.listdir(ann_dir):
# video_name = os.path.splitext(ann_file)[0] # Extract video folder name from file
# ann_path = os.path.join(ann_dir, ann_file)
# if os.path.isdir(ann_path):
# continue # Skip directories
# print("Processing annotation file:", ann_path)
# with open(ann_path, "r", encoding="utf-8") as f:
# for line in f:
# parts = line.strip().split()
# if len(parts) < 8:
# continue
# frame_id, track_id, class_name = parts[:3]
# bbox = list(map(float, parts[4:8]))
# class_id = _GT_OBJECT_CLASSES.get(class_name, -1)
# img_name = f"{frame_id}.jpg"
# # Store annotation in a dictionary
# key = f"{video_name}/{img_name}"
# if key not in annotations:
# annotations[key] = []
# annotations[key].append(
# {
# "bbox": bbox,
# "class_id": class_id,
# "track_id": int(track_id),
# "class_name": class_name,
# }
# )
# # Yield dataset entries
# idx = 0
# for file_path, file_obj in images:
# img_name = os.path.basename(file_path)
# video_name = os.path.basename(os.path.dirname(file_path)) # Match the video folder
# key = f"{video_name}/{img_name}"
# if key in annotations:
# yield idx, {
# "image": {"path": file_path, "bytes": file_obj.read()},
# "objects": annotations[key],
# }
# idx += 1
import os
import datasets
import tarfile
_HOMEPAGE = "https://github.com/AV-Lab/emt-dataset"
_LICENSE = "CC-BY-SA 4.0"
_CITATION = """
@article{EMTdataset2025,
title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region},
author={Nadya Abdel Madjid and Murad Mebrahtu and Abdelmoamen Nasser and Bilal Hassan and Naoufel Werghi and Jorge Dias and Majid Khonji},
year={2025},
eprint={2502.19260},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2502.19260}
}
"""
_DESCRIPTION = """\
A multi-task dataset for detection, tracking, prediction, and intention prediction.
This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection and tracking.
"""
_TRAIN_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_images.tar.gz"
_TEST_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_images.tar.gz"
_TRAIN_ANNOTATION_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_annotation.tar.gz"
_TEST_ANNOTATION_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_annotation.tar.gz"
_GT_OBJECT_CLASSES = {
0: "Pedestrian",
1: "Cyclist",
2: "Motorbike",
3: "Small_motorised_vehicle",
4: "Car",
5: "Medium_vehicle",
6: "Large_vehicle",
7: "Bus",
8: "Emergency_vehicle",
}
class EMT(datasets.GeneratorBasedBuilder):
"""EMT dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="emt",
description="Training split of the EMT dataset",
version=datasets.Version("1.0.0"),
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"objects": datasets.Sequence(
{
"bbox": datasets.Sequence(datasets.Value("float32"), length=4),
"class_id": datasets.Value("int32"),
"track_id": datasets.Value("int32"),
"class_name": datasets.Value("string"),
}
),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
# def _split_generators(self, dl_manager):
# """Download (if not cached) and prepare dataset splits."""
# image_urls = {
# "train": _TRAIN_IMAGE_ARCHIVE_URL,
# "test": _TEST_IMAGE_ARCHIVE_URL,
# }
# annotation_urls = {
# "train": _TRAIN_ANNOTATION_ARCHIVE_URL,
# "test": _TEST_ANNOTATION_ARCHIVE_URL,
# }
# # Ensure paths are correctly resolved for the requested split
# extracted_paths = dl_manager.download_and_extract(annotation_urls)
# image_archives = dl_manager.download_and_extract(image_urls)
# # Ensure annotation paths point to the correct subdirectory
# train_annotation_path = os.path.join(extracted_paths["train"], "EMT", "annotations", "train")
# test_annotation_path = os.path.join(extracted_paths["test"], "EMT", "annotations", "test")
# return [
# datasets.SplitGenerator(
# name=datasets.Split.TRAIN,
# gen_kwargs={
# "images": dl_manager.iter_archive(image_archives["train"]),
# "annotation_path": train_annotation_path,
# },
# ),
# datasets.SplitGenerator(
# name=datasets.Split.TEST,
# gen_kwargs={
# "images": dl_manager.iter_archive(image_archives["test"]),
# "annotation_path": test_annotation_path,
# },
# ),
# ]
def _split_generators(self, dl_manager):
"""Download (if not cached) and prepare dataset splits."""
# Define dataset URLs
image_urls = {
"train": _TRAIN_IMAGE_ARCHIVE_URL,
"test": _TEST_IMAGE_ARCHIVE_URL,
}
annotation_urls = {
"train": _TRAIN_ANNOTATION_ARCHIVE_URL,
"test": _TEST_ANNOTATION_ARCHIVE_URL,
}
# Extract all data (both splits)
extracted_images = dl_manager.download_and_extract(image_urls)
extracted_annotations = dl_manager.download_and_extract(annotation_urls)
# Define paths
train_annotation_path = os.path.join(extracted_annotations["train"], "annotations", "train")
test_annotation_path = os.path.join(extracted_annotations["test"], "annotations", "test")
train_image_path = extracted_images["train"]
test_image_path = extracted_images["test"]
# Return available splits (Hugging Face will filter based on user request)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"image_dir": train_image_path,
"annotation_path": train_annotation_path,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"image_dir": test_image_path,
"annotation_path": test_annotation_path,
},
),
]
def _generate_examples(self, images, annotation_path):
"""Generate dataset examples by matching images to their corresponding annotations."""
annotations = {}
# Determine whether we're processing train or test split
if "train" in annotation_path:
annotation_split = "train"
elif "test" in annotation_path:
annotation_split = "test"
else:
raise ValueError(f"Unknown annotation path: {annotation_path}")
ann_dir = annotation_path
print(f"Extracted annotations path: {annotation_path}")
print(f"Looking for annotations in: {ann_dir}")
# Check if annotation directory exists
if not os.path.exists(ann_dir):
raise FileNotFoundError(f"Annotation directory does not exist: {ann_dir}")
# Extract annotation files and read their contents
for ann_file in os.listdir(ann_dir):
video_name = os.path.splitext(ann_file)[0] # Extract video folder name from file
ann_path = os.path.join(ann_dir, ann_file)
if os.path.isdir(ann_path):
continue # Skip directories
print("Processing annotation file:", ann_path)
with open(ann_path, "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split()
if len(parts) < 8:
continue
frame_id, track_id, class_name = parts[:3]
bbox = list(map(float, parts[4:8]))
class_id = _GT_OBJECT_CLASSES.get(class_name, -1)
img_name = f"{frame_id}.jpg"
# Store annotation in a dictionary
key = f"{video_name}/{img_name}"
if key not in annotations:
annotations[key] = []
annotations[key].append(
{
"bbox": bbox,
"class_id": class_id,
"track_id": int(track_id),
"class_name": class_name,
}
)
# Yield dataset entries
idx = 0
for file_path, file_obj in images:
img_name = os.path.basename(file_path)
video_name = os.path.basename(os.path.dirname(file_path)) # Match the video folder
key = f"{video_name}/{img_name}"
if key in annotations:
yield idx, {
"image": {"path": file_path, "bytes": file_obj.read()},
"objects": annotations[key],
}
idx += 1