Murad Mebrahtu commited on
Commit
88e3654
·
1 Parent(s): 495e513

Added annotations

Browse files
Files changed (1) hide show
  1. emt.py +267 -0
emt.py CHANGED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # """EMT dataset."""
2
+
3
+ # import os
4
+ # import json
5
+
6
+ # import datasets
7
+
8
+
9
+ # _HOMEPAGE = "https://github.com/AV-Lab/emt-dataset"
10
+
11
+ # _LICENSE = "CC-BY-SA 4.0"
12
+
13
+ # _CITATION = """
14
+ # @article{EMTdataset2025,
15
+ # title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region},
16
+ # author={Nadya Abdel Madjid and Murad Mebrahtu and Abdelmoamen Nasser and Bilal Hassan and Naoufel Werghi and Jorge Dias and Majid Khonji},
17
+ # year={2025},
18
+ # eprint={2502.19260},
19
+ # archivePrefix={arXiv},
20
+ # primaryClass={cs.CV},
21
+ # url={https://arxiv.org/abs/2502.19260}
22
+ # }
23
+ # """
24
+
25
+ # _DESCRIPTION = """\
26
+ # A multi-task dataset for detection, tracking, prediction, and intention prediction.
27
+ # This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection + tracking.",
28
+
29
+ # """
30
+
31
+
32
+ # _LABEL_MAP = [
33
+ # 'n01440764',
34
+ # 'n02102040',
35
+ # 'n02979186',
36
+ # 'n03000684',
37
+ # 'n03028079',
38
+ # 'n03394916',
39
+ # 'n03417042',
40
+ # 'n03425413',
41
+ # 'n03445777',
42
+ # 'n03888257',
43
+ # ]
44
+
45
+ # # _REPO = "https://huggingface.co/datasets/frgfm/imagenette/resolve/main/metadata"
46
+ # _REPO = "https://huggingface.co/datasets/Murdism/EMT/resolve/main/labels"
47
+
48
+
49
+
50
+ # class EMTConfig(datasets.BuilderConfig):
51
+ # """BuilderConfig for EMT."""
52
+
53
+ # def __init__(self, data_url, metadata_urls, **kwargs):
54
+ # """BuilderConfig for EMT.
55
+ # Args:
56
+ # data_url: `string`, url to download the zip file from.
57
+ # matadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
58
+ # **kwargs: keyword arguments forwarded to super.
59
+ # """
60
+ # super(EMTConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
61
+ # self.data_url = data_url
62
+ # self.metadata_urls = metadata_urls
63
+
64
+
65
+ # class EMT(datasets.GeneratorBasedBuilder):
66
+ # """Imagenette dataset."""
67
+
68
+ # BUILDER_CONFIGS = [
69
+ # EMTConfig(
70
+ # name="full_size",
71
+ # description="All images are in their original size.",
72
+ # data_url="https://huggingface.co/datasets/KuAvLab/EMT/blob/main/emt_images.tar.gz",
73
+ # metadata_urls={
74
+ # "train": f"{_REPO}/train/",
75
+ # "test": f"{_REPO}/test/",
76
+ # },
77
+ # )
78
+ # ]
79
+
80
+ # def _info(self):
81
+ # return datasets.DatasetInfo(
82
+ # description=_DESCRIPTION + self.config.description,
83
+ # features=datasets.Features(
84
+ # {
85
+ # "image": datasets.Image(),
86
+ # "label": datasets.ClassLabel(
87
+ # names=[
88
+ # "bbox",
89
+ # "class_id",
90
+ # "track_id",
91
+ # "class_name",
92
+
93
+ # ]
94
+ # ),
95
+ # }
96
+ # ),
97
+ # supervised_keys=None,
98
+ # homepage=_HOMEPAGE,
99
+ # license=_LICENSE,
100
+ # citation=_CITATION,
101
+ # )
102
+
103
+ # def _split_generators(self, dl_manager):
104
+ # archive_path = dl_manager.download(self.config.data_url)
105
+ # metadata_paths = dl_manager.download(self.config.metadata_urls)
106
+ # archive_iter = dl_manager.iter_archive(archive_path)
107
+ # return [
108
+ # datasets.SplitGenerator(
109
+ # name=datasets.Split.TRAIN,
110
+ # gen_kwargs={
111
+ # "images": archive_iter,
112
+ # "metadata_path": metadata_paths["train"],
113
+ # },
114
+ # ),
115
+ # datasets.SplitGenerator(
116
+ # name=datasets.Split.TEST,
117
+ # gen_kwargs={
118
+ # "images": os.path.join(self.config.data_url, "test"),
119
+ # "metadata_path": metadata_paths["test"],
120
+ # },
121
+ # ),
122
+ # ]
123
+
124
+ # def _generate_examples(self, images, metadata_path):
125
+ # with open(metadata_path, encoding="utf-8") as f:
126
+ # files_to_keep = set(f.read().split("\n"))
127
+ # idx = 0
128
+ # for file_path, file_obj in images:
129
+ # if file_path in files_to_keep:
130
+ # label = _LABEL_MAP.index(file_path.split("/")[-2])
131
+ # yield idx, {
132
+ # "image": {"path": file_path, "bytes": file_obj.read()},
133
+ # "label": label,
134
+ # }
135
+ # idx += 1
136
+
137
+ """EMT dataset."""
138
+
139
+ import os
140
+ import json
141
+ import pandas as pd
142
+ import datasets
143
+
144
+ _HOMEPAGE = "https://github.com/AV-Lab/emt-dataset"
145
+ _LICENSE = "CC-BY-SA 4.0"
146
+
147
+ _CITATION = """
148
+ @article{EMTdataset2025,
149
+ title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region},
150
+ author={Nadya Abdel Madjid and Murad Mebrahtu and Abdelmoamen Nasser and Bilal Hassan and Naoufel Werghi and Jorge Dias and Majid Khonji},
151
+ year={2025},
152
+ eprint={2502.19260},
153
+ archivePrefix={arXiv},
154
+ primaryClass={cs.CV},
155
+ url={https://arxiv.org/abs/2502.19260}
156
+ }
157
+ """
158
+
159
+ _DESCRIPTION = """\
160
+ A multi-task dataset for detection, tracking, prediction, and intention prediction.
161
+ This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection + tracking.",
162
+ """
163
+
164
+ _REPO = "https://huggingface.co/datasets/Murdism/EMT/resolve/main/annotations"
165
+
166
+ class EMTConfig(datasets.BuilderConfig):
167
+ """BuilderConfig for EMT."""
168
+
169
+ def __init__(self, data_url, annotation_url, **kwargs):
170
+ """BuilderConfig for EMT.
171
+ Args:
172
+ data_url: `string`, URL to download the image archive (.tar file).
173
+ annotation_url: `string`, URL to download the annotations (Parquet file).
174
+ **kwargs: keyword arguments forwarded to super.
175
+ """
176
+ super(EMTConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
177
+ self.data_url = data_url
178
+ self.annotation_url = annotation_url
179
+
180
+
181
+ class EMT(datasets.GeneratorBasedBuilder):
182
+ """EMT dataset."""
183
+
184
+ BUILDER_CONFIGS = [
185
+ EMTConfig(
186
+ name="full_size",
187
+ description="All images are in their original size.",
188
+ data_url="https://huggingface.co/datasets/KuAvLab/EMT/blob/main/emt_images.tar.gz",
189
+ annotation_url="https://huggingface.co/datasets/Murdism/EMT/resolve/main/annotations/",
190
+ )
191
+ ]
192
+
193
+ def _info(self):
194
+ return datasets.DatasetInfo(
195
+ description=_DESCRIPTION + self.config.description,
196
+ features=datasets.Features(
197
+ {
198
+ "image": datasets.Image(),
199
+ "objects": datasets.Sequence(
200
+ {
201
+ "bbox": datasets.Sequence(datasets.Float32()),
202
+ "class_id": datasets.Value("int32"),
203
+ "track_id": datasets.Value("int32"),
204
+ "class_name": datasets.Value("string"),
205
+ }
206
+ ),
207
+ }
208
+ ),
209
+ supervised_keys=None,
210
+ homepage=_HOMEPAGE,
211
+ license=_LICENSE,
212
+ citation=_CITATION,
213
+ )
214
+
215
+ def _split_generators(self, dl_manager):
216
+ archive_path = dl_manager.download(self.config.data_url)
217
+ annotation_paths = {
218
+ "train": dl_manager.download_and_extract(self.config.annotation_url + "train_annotations.parquet"),
219
+ "test": dl_manager.download_and_extract(self.config.annotation_url + "test_annotations.parquet"),
220
+ }
221
+
222
+ return [
223
+ datasets.SplitGenerator(
224
+ name=datasets.Split.TRAIN,
225
+ gen_kwargs={
226
+ "images": dl_manager.iter_archive(archive_path),
227
+ "annotation_path": annotation_paths["train"],
228
+ },
229
+ ),
230
+ datasets.SplitGenerator(
231
+ name=datasets.Split.TEST,
232
+ gen_kwargs={
233
+ "images": dl_manager.iter_archive(archive_path),
234
+ "annotation_path": annotation_paths["test"],
235
+ },
236
+ ),
237
+ ]
238
+
239
+ def _generate_examples(self, images, annotation_path):
240
+ """Generate examples from Parquet annotations and image archive."""
241
+
242
+ # Load annotations from Parquet
243
+ df = pd.read_parquet(annotation_path)
244
+
245
+ # Convert DataFrame into a dictionary for faster lookups
246
+ annotation_dict = {}
247
+ for _, row in df.iterrows():
248
+ img_path = row["file_path"].split("/")[-2] + "/" + row["file_path"].split("/")[-1]
249
+ if img_path not in annotation_dict:
250
+ annotation_dict[img_path] = []
251
+ annotation_dict[img_path].append(
252
+ {
253
+ "bbox": row["bbox"],
254
+ "class_id": row["class_id"],
255
+ "track_id": row["track_id"],
256
+ "class_name": row["class_name"],
257
+ }
258
+ )
259
+
260
+ idx = 0
261
+ for file_path, file_obj in images:
262
+ if file_path in annotation_dict:
263
+ yield idx, {
264
+ "image": {"path": file_path, "bytes": file_obj.read()},
265
+ "objects": annotation_dict[file_path],
266
+ }
267
+ idx += 1