KuAvLab commited on
Commit
cdb0b4e
·
verified ·
1 Parent(s): 5ef77e4

Update EMT.py

Browse files
Files changed (1) hide show
  1. EMT.py +183 -48
EMT.py CHANGED
@@ -1,37 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import datasets
3
- import tarfile
4
-
5
- _HOMEPAGE = "https://github.com/AV-Lab/emt-dataset"
6
- _LICENSE = "CC-BY-SA 4.0"
7
- _CITATION = """
8
- @article{EMTdataset2025,
9
- title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region},
10
- author={Nadya Abdel Madjid and Murad Mebrahtu and Abdelmoamen Nasser and Bilal Hassan and Naoufel Werghi and Jorge Dias and Majid Khonji},
11
- year={2025},
12
- eprint={2502.19260},
13
- archivePrefix={arXiv},
14
- primaryClass={cs.CV},
15
- url={https://arxiv.org/abs/2502.19260}
16
- }
17
- """
18
-
19
- _DESCRIPTION = """\
20
- A multi-task dataset for detection, tracking, prediction, and intention prediction.
21
- This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection and tracking.
22
- """
23
-
24
- # # Annotation repository
25
- # _ANNOTATION_REPO = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/annotations"
26
 
27
  # Tar file URLs for images
28
  _TRAIN_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_images.tar.gz"
29
  _TEST_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_images.tar.gz"
30
 
31
- # Tar file URLs for annotations
32
- _TRAIN_ANNOTATION_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_annotation.tar.gz"
33
- _TEST_ANNOTATION_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_annotation.tar.gz"
34
-
35
 
36
  class EMT(datasets.GeneratorBasedBuilder):
37
  """EMT dataset."""
@@ -72,28 +211,24 @@ class EMT(datasets.GeneratorBasedBuilder):
72
  "train": _TRAIN_IMAGE_ARCHIVE_URL,
73
  "test": _TEST_IMAGE_ARCHIVE_URL,
74
  }
75
-
76
- # Download the tar file for annotations
77
- # annotation_urls = {
78
- # "train": _TRAIN_ANNOTATION_ARCHIVE_URL,
79
- # "test": _TEST_ANNOTATION_ARCHIVE_URL,
80
- # }
81
- annotation_urls = {
82
- "train": "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_annotation.tar.gz",
83
- "test": "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_annotation.tar.gz",
84
- }
85
  # Download image files
86
  images = {
87
  "train": dl_manager.iter_archive(image_urls["train"]),
88
  "test": dl_manager.iter_archive(image_urls["test"]),
89
  }
90
-
91
- # Download annotation files and extract them
 
 
 
 
 
92
  annotations = {
93
  "train": dl_manager.download_and_extract(annotation_urls["train"]),
94
  "test": dl_manager.download_and_extract(annotation_urls["test"]),
95
  }
96
-
97
  return [
98
  datasets.SplitGenerator(
99
  name=datasets.Split.TRAIN,
@@ -116,28 +251,29 @@ class EMT(datasets.GeneratorBasedBuilder):
116
 
117
  annotations = {}
118
 
119
- # Load all annotations into memory
120
  for ann_file in os.listdir(annotation_path):
121
- video_name = os.path.splitext(ann_file)[0] # Get video folder name from the annotation file
 
122
  ann_path = os.path.join(annotation_path, ann_file)
123
- print("ann_path:,",ann_path,"\nannotation_path: ",annotation_path)
124
 
 
125
  with open(ann_path, "r", encoding="utf-8") as f:
126
  for line in f:
127
  parts = line.strip().split()
128
  if len(parts) < 8:
129
  continue
130
-
131
  frame_id, track_id, class_name = parts[:3]
132
  bbox = list(map(float, parts[4:8]))
133
  class_id = _GT_OBJECT_CLASSES.get(class_name, -1)
134
  img_name = f"{frame_id}.jpg"
135
-
136
  # Store annotation in a dictionary
137
  key = f"{video_name}/{img_name}"
138
  if key not in annotations:
139
  annotations[key] = []
140
-
141
  annotations[key].append(
142
  {
143
  "bbox": bbox,
@@ -153,11 +289,10 @@ class EMT(datasets.GeneratorBasedBuilder):
153
  img_name = os.path.basename(file_path)
154
  video_name = os.path.basename(os.path.dirname(file_path)) # Match the video folder
155
  key = f"{video_name}/{img_name}"
156
-
157
  if key in annotations:
158
  yield idx, {
159
  "image": {"path": file_path, "bytes": file_obj.read()},
160
  "objects": annotations[key],
161
  }
162
  idx += 1
163
-
 
1
+ # import os
2
+ # import datasets
3
+ # import tarfile
4
+
5
+ # _HOMEPAGE = "https://github.com/AV-Lab/emt-dataset"
6
+ # _LICENSE = "CC-BY-SA 4.0"
7
+ # _CITATION = """
8
+ # @article{EMTdataset2025,
9
+ # title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region},
10
+ # author={Nadya Abdel Madjid and Murad Mebrahtu and Abdelmoamen Nasser and Bilal Hassan and Naoufel Werghi and Jorge Dias and Majid Khonji},
11
+ # year={2025},
12
+ # eprint={2502.19260},
13
+ # archivePrefix={arXiv},
14
+ # primaryClass={cs.CV},
15
+ # url={https://arxiv.org/abs/2502.19260}
16
+ # }
17
+ # """
18
+
19
+ # _DESCRIPTION = """\
20
+ # A multi-task dataset for detection, tracking, prediction, and intention prediction.
21
+ # This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection and tracking.
22
+ # """
23
+
24
+ # # # Annotation repository
25
+ # # _ANNOTATION_REPO = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/annotations"
26
+
27
+ # # Tar file URLs for images
28
+ # _TRAIN_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_images.tar.gz"
29
+ # _TEST_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_images.tar.gz"
30
+
31
+ # # Tar file URLs for annotations
32
+ # _TRAIN_ANNOTATION_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_annotation.tar.gz"
33
+ # _TEST_ANNOTATION_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_annotation.tar.gz"
34
+
35
+
36
+ # class EMT(datasets.GeneratorBasedBuilder):
37
+ # """EMT dataset."""
38
+
39
+ # BUILDER_CONFIGS = [
40
+ # datasets.BuilderConfig(
41
+ # name="full_size",
42
+ # description="All images are in their original size.",
43
+ # version=datasets.Version("1.0.0"),
44
+ # )
45
+ # ]
46
+
47
+ # def _info(self):
48
+ # return datasets.DatasetInfo(
49
+ # description=_DESCRIPTION,
50
+ # features=datasets.Features(
51
+ # {
52
+ # "image": datasets.Image(),
53
+ # "objects": datasets.Sequence(
54
+ # {
55
+ # "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
56
+ # "class_id": datasets.Value("int32"),
57
+ # "track_id": datasets.Value("int32"),
58
+ # "class_name": datasets.Value("string"),
59
+ # }
60
+ # ),
61
+ # }
62
+ # ),
63
+ # supervised_keys=None,
64
+ # homepage=_HOMEPAGE,
65
+ # license=_LICENSE,
66
+ # citation=_CITATION,
67
+ # )
68
+
69
+ # def _split_generators(self, dl_manager):
70
+ # """Download train/test images and annotations."""
71
+ # image_urls = {
72
+ # "train": _TRAIN_IMAGE_ARCHIVE_URL,
73
+ # "test": _TEST_IMAGE_ARCHIVE_URL,
74
+ # }
75
+
76
+ # # Download the tar file for annotations
77
+ # # annotation_urls = {
78
+ # # "train": _TRAIN_ANNOTATION_ARCHIVE_URL,
79
+ # # "test": _TEST_ANNOTATION_ARCHIVE_URL,
80
+ # # }
81
+ # annotation_urls = {
82
+ # "train": "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_annotation.tar.gz",
83
+ # "test": "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_annotation.tar.gz",
84
+ # }
85
+ # # Download image files
86
+ # images = {
87
+ # "train": dl_manager.iter_archive(image_urls["train"]),
88
+ # "test": dl_manager.iter_archive(image_urls["test"]),
89
+ # }
90
+
91
+ # # Download annotation files and extract them
92
+ # annotations = {
93
+ # "train": dl_manager.download_and_extract(annotation_urls["train"]),
94
+ # "test": dl_manager.download_and_extract(annotation_urls["test"]),
95
+ # }
96
+
97
+ # return [
98
+ # datasets.SplitGenerator(
99
+ # name=datasets.Split.TRAIN,
100
+ # gen_kwargs={
101
+ # "images": images["train"],
102
+ # "annotation_path": annotations["train"],
103
+ # },
104
+ # ),
105
+ # datasets.SplitGenerator(
106
+ # name=datasets.Split.TEST,
107
+ # gen_kwargs={
108
+ # "images": images["test"],
109
+ # "annotation_path": annotations["test"],
110
+ # },
111
+ # ),
112
+ # ]
113
+
114
+ # def _generate_examples(self, images, annotation_path):
115
+ # """Generate dataset examples by matching images to their corresponding annotations."""
116
+
117
+ # annotations = {}
118
+
119
+ # # Load all annotations into memory
120
+ # for ann_file in os.listdir(annotation_path):
121
+ # video_name = os.path.splitext(ann_file)[0] # Get video folder name from the annotation file
122
+ # ann_path = os.path.join(annotation_path, ann_file)
123
+ # print("ann_path:,",ann_path,"\nannotation_path: ",annotation_path)
124
+
125
+ # with open(ann_path, "r", encoding="utf-8") as f:
126
+ # for line in f:
127
+ # parts = line.strip().split()
128
+ # if len(parts) < 8:
129
+ # continue
130
+
131
+ # frame_id, track_id, class_name = parts[:3]
132
+ # bbox = list(map(float, parts[4:8]))
133
+ # class_id = _GT_OBJECT_CLASSES.get(class_name, -1)
134
+ # img_name = f"{frame_id}.jpg"
135
+
136
+ # # Store annotation in a dictionary
137
+ # key = f"{video_name}/{img_name}"
138
+ # if key not in annotations:
139
+ # annotations[key] = []
140
+
141
+ # annotations[key].append(
142
+ # {
143
+ # "bbox": bbox,
144
+ # "class_id": class_id,
145
+ # "track_id": int(track_id),
146
+ # "class_name": class_name,
147
+ # }
148
+ # )
149
+
150
+ # # Yield dataset entries
151
+ # idx = 0
152
+ # for file_path, file_obj in images:
153
+ # img_name = os.path.basename(file_path)
154
+ # video_name = os.path.basename(os.path.dirname(file_path)) # Match the video folder
155
+ # key = f"{video_name}/{img_name}"
156
+
157
+ # if key in annotations:
158
+ # yield idx, {
159
+ # "image": {"path": file_path, "bytes": file_obj.read()},
160
+ # "objects": annotations[key],
161
+ # }
162
+ # idx += 1
163
+
164
  import os
165
  import datasets
166
+
167
+ # Annotation repository
168
+ _ANNOTATION_REPO = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/annotations"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
 
170
  # Tar file URLs for images
171
  _TRAIN_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_images.tar.gz"
172
  _TEST_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_images.tar.gz"
173
 
 
 
 
 
174
 
175
  class EMT(datasets.GeneratorBasedBuilder):
176
  """EMT dataset."""
 
211
  "train": _TRAIN_IMAGE_ARCHIVE_URL,
212
  "test": _TEST_IMAGE_ARCHIVE_URL,
213
  }
214
+
 
 
 
 
 
 
 
 
 
215
  # Download image files
216
  images = {
217
  "train": dl_manager.iter_archive(image_urls["train"]),
218
  "test": dl_manager.iter_archive(image_urls["test"]),
219
  }
220
+
221
+ # Download the annotation files from the remote repository
222
+ annotation_urls = {
223
+ "train": _ANNOTATION_REPO + "/train/",
224
+ "test": _ANNOTATION_REPO + "/test/",
225
+ }
226
+
227
  annotations = {
228
  "train": dl_manager.download_and_extract(annotation_urls["train"]),
229
  "test": dl_manager.download_and_extract(annotation_urls["test"]),
230
  }
231
+
232
  return [
233
  datasets.SplitGenerator(
234
  name=datasets.Split.TRAIN,
 
251
 
252
  annotations = {}
253
 
254
+ # Load all annotations into memory from the extracted remote tar file
255
  for ann_file in os.listdir(annotation_path):
256
+ # Get video folder name (e.g., video_12211.txt)
257
+ video_name = os.path.splitext(ann_file)[0]
258
  ann_path = os.path.join(annotation_path, ann_file)
 
259
 
260
+ # Open the annotation file for reading
261
  with open(ann_path, "r", encoding="utf-8") as f:
262
  for line in f:
263
  parts = line.strip().split()
264
  if len(parts) < 8:
265
  continue
266
+
267
  frame_id, track_id, class_name = parts[:3]
268
  bbox = list(map(float, parts[4:8]))
269
  class_id = _GT_OBJECT_CLASSES.get(class_name, -1)
270
  img_name = f"{frame_id}.jpg"
271
+
272
  # Store annotation in a dictionary
273
  key = f"{video_name}/{img_name}"
274
  if key not in annotations:
275
  annotations[key] = []
276
+
277
  annotations[key].append(
278
  {
279
  "bbox": bbox,
 
289
  img_name = os.path.basename(file_path)
290
  video_name = os.path.basename(os.path.dirname(file_path)) # Match the video folder
291
  key = f"{video_name}/{img_name}"
292
+
293
  if key in annotations:
294
  yield idx, {
295
  "image": {"path": file_path, "bytes": file_obj.read()},
296
  "objects": annotations[key],
297
  }
298
  idx += 1