KuAvLab commited on
Commit
fcc30c7
·
verified ·
1 Parent(s): e41e310

Update EMT.py

Browse files
Files changed (1) hide show
  1. EMT.py +225 -54
EMT.py CHANGED
@@ -1,4 +1,212 @@
1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import os
3
  import datasets
4
  import tarfile
@@ -34,8 +242,8 @@ class EMT(datasets.GeneratorBasedBuilder):
34
 
35
  BUILDER_CONFIGS = [
36
  datasets.BuilderConfig(
37
- name="full_size",
38
- description="All images are in their original size.",
39
  version=datasets.Version("1.0.0"),
40
  )
41
  ]
@@ -75,66 +283,31 @@ class EMT(datasets.GeneratorBasedBuilder):
75
  "test": _TEST_ANNOTATION_ARCHIVE_URL,
76
  }
77
 
78
- # Based on the requested split, we only download the relevant data
79
- split = self.config.name # Determine the requested split (train or test)
80
-
81
  # Ensure paths are correctly resolved for the requested split
82
- extracted_paths = dl_manager.download_and_extract({split: annotation_urls[split]})
83
- image_archives = dl_manager.download_and_extract({split: image_urls[split]})
84
-
85
  # Ensure annotation paths point to the correct subdirectory
86
- annotation_path = os.path.join(extracted_paths[split], "annotations", split)
87
- image_path = image_archives[split]
88
 
89
  return [
90
  datasets.SplitGenerator(
91
- name=datasets.Split.TRAIN if split == "train" else datasets.Split.TEST,
 
 
 
 
 
 
 
92
  gen_kwargs={
93
- "images": dl_manager.iter_archive(image_path),
94
- "annotation_path": annotation_path,
95
  },
96
  ),
97
  ]
98
 
99
-
100
- # def _split_generators(self, dl_manager):
101
- # """Download (if not cached) and prepare dataset splits."""
102
-
103
- # image_urls = {
104
- # "train": _TRAIN_IMAGE_ARCHIVE_URL,
105
- # "test": _TEST_IMAGE_ARCHIVE_URL,
106
- # }
107
-
108
- # annotation_urls = {
109
- # "train": _TRAIN_ANNOTATION_ARCHIVE_URL,
110
- # "test": _TEST_ANNOTATION_ARCHIVE_URL,
111
- # }
112
-
113
- # # Ensure paths are correctly resolved
114
- # extracted_paths = dl_manager.download_and_extract(annotation_urls)
115
- # image_archives = dl_manager.download_and_extract(image_urls)
116
-
117
- # # ✅ Ensure annotation paths point to the correct subdirectory
118
- # train_annotation_path = os.path.join(extracted_paths["train"], "annotations", "train")
119
- # test_annotation_path = os.path.join(extracted_paths["test"], "annotations", "test")
120
-
121
- # return [
122
- # datasets.SplitGenerator(
123
- # name=datasets.Split.TRAIN,
124
- # gen_kwargs={
125
- # "images": dl_manager.iter_archive(image_archives["train"]),
126
- # "annotation_path": train_annotation_path, # ✅ Corrected path
127
- # },
128
- # ),
129
- # datasets.SplitGenerator(
130
- # name=datasets.Split.TEST,
131
- # gen_kwargs={
132
- # "images": dl_manager.iter_archive(image_archives["test"]),
133
- # "annotation_path": test_annotation_path, # ✅ Corrected path
134
- # },
135
- # ),
136
- # ]
137
-
138
  def _generate_examples(self, images, annotation_path):
139
  """Generate dataset examples by matching images to their corresponding annotations."""
140
 
@@ -205,5 +378,3 @@ class EMT(datasets.GeneratorBasedBuilder):
205
  "objects": annotations[key],
206
  }
207
  idx += 1
208
-
209
-
 
1
 
2
+ # import os
3
+ # import datasets
4
+ # import tarfile
5
+
6
+ # _HOMEPAGE = "https://github.com/AV-Lab/emt-dataset"
7
+ # _LICENSE = "CC-BY-SA 4.0"
8
+ # _CITATION = """
9
+ # @article{EMTdataset2025,
10
+ # title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region},
11
+ # author={Nadya Abdel Madjid and Murad Mebrahtu and Abdelmoamen Nasser and Bilal Hassan and Naoufel Werghi and Jorge Dias and Majid Khonji},
12
+ # year={2025},
13
+ # eprint={2502.19260},
14
+ # archivePrefix={arXiv},
15
+ # primaryClass={cs.CV},
16
+ # url={https://arxiv.org/abs/2502.19260}
17
+ # }
18
+ # """
19
+
20
+ # _DESCRIPTION = """\
21
+ # A multi-task dataset for detection, tracking, prediction, and intention prediction.
22
+ # This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection and tracking.
23
+ # """
24
+
25
+ # _TRAIN_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_images.tar.gz"
26
+ # _TEST_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_images.tar.gz"
27
+
28
+ # _TRAIN_ANNOTATION_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_annotation.tar.gz"
29
+ # _TEST_ANNOTATION_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_annotation.tar.gz"
30
+
31
+
32
+ # class EMT(datasets.GeneratorBasedBuilder):
33
+ # """EMT dataset."""
34
+
35
+ # BUILDER_CONFIGS = [
36
+ # datasets.BuilderConfig(
37
+ # name="full_size",
38
+ # description="All images are in their original size.",
39
+ # version=datasets.Version("1.0.0"),
40
+ # )
41
+ # ]
42
+
43
+ # def _info(self):
44
+ # return datasets.DatasetInfo(
45
+ # description=_DESCRIPTION,
46
+ # features=datasets.Features(
47
+ # {
48
+ # "image": datasets.Image(),
49
+ # "objects": datasets.Sequence(
50
+ # {
51
+ # "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
52
+ # "class_id": datasets.Value("int32"),
53
+ # "track_id": datasets.Value("int32"),
54
+ # "class_name": datasets.Value("string"),
55
+ # }
56
+ # ),
57
+ # }
58
+ # ),
59
+ # supervised_keys=None,
60
+ # homepage=_HOMEPAGE,
61
+ # license=_LICENSE,
62
+ # citation=_CITATION,
63
+ # )
64
+
65
+ # def _split_generators(self, dl_manager):
66
+ # """Download (if not cached) and prepare dataset splits."""
67
+
68
+ # image_urls = {
69
+ # "train": _TRAIN_IMAGE_ARCHIVE_URL,
70
+ # "test": _TEST_IMAGE_ARCHIVE_URL,
71
+ # }
72
+
73
+ # annotation_urls = {
74
+ # "train": _TRAIN_ANNOTATION_ARCHIVE_URL,
75
+ # "test": _TEST_ANNOTATION_ARCHIVE_URL,
76
+ # }
77
+
78
+ # # Based on the requested split, we only download the relevant data
79
+ # split = self.config.name # Determine the requested split (train or test)
80
+
81
+ # # Ensure paths are correctly resolved for the requested split
82
+ # extracted_paths = dl_manager.download_and_extract({split: annotation_urls[split]})
83
+ # image_archives = dl_manager.download_and_extract({split: image_urls[split]})
84
+
85
+ # # Ensure annotation paths point to the correct subdirectory
86
+ # annotation_path = os.path.join(extracted_paths[split], "annotations", split)
87
+ # image_path = image_archives[split]
88
+
89
+ # return [
90
+ # datasets.SplitGenerator(
91
+ # name=datasets.Split.TRAIN if split == "train" else datasets.Split.TEST,
92
+ # gen_kwargs={
93
+ # "images": dl_manager.iter_archive(image_path),
94
+ # "annotation_path": annotation_path,
95
+ # },
96
+ # ),
97
+ # ]
98
+
99
+
100
+ # # def _split_generators(self, dl_manager):
101
+ # # """Download (if not cached) and prepare dataset splits."""
102
+
103
+ # # image_urls = {
104
+ # # "train": _TRAIN_IMAGE_ARCHIVE_URL,
105
+ # # "test": _TEST_IMAGE_ARCHIVE_URL,
106
+ # # }
107
+
108
+ # # annotation_urls = {
109
+ # # "train": _TRAIN_ANNOTATION_ARCHIVE_URL,
110
+ # # "test": _TEST_ANNOTATION_ARCHIVE_URL,
111
+ # # }
112
+
113
+ # # # Ensure paths are correctly resolved
114
+ # # extracted_paths = dl_manager.download_and_extract(annotation_urls)
115
+ # # image_archives = dl_manager.download_and_extract(image_urls)
116
+
117
+ # # # ✅ Ensure annotation paths point to the correct subdirectory
118
+ # # train_annotation_path = os.path.join(extracted_paths["train"], "annotations", "train")
119
+ # # test_annotation_path = os.path.join(extracted_paths["test"], "annotations", "test")
120
+
121
+ # # return [
122
+ # # datasets.SplitGenerator(
123
+ # # name=datasets.Split.TRAIN,
124
+ # # gen_kwargs={
125
+ # # "images": dl_manager.iter_archive(image_archives["train"]),
126
+ # # "annotation_path": train_annotation_path, # ✅ Corrected path
127
+ # # },
128
+ # # ),
129
+ # # datasets.SplitGenerator(
130
+ # # name=datasets.Split.TEST,
131
+ # # gen_kwargs={
132
+ # # "images": dl_manager.iter_archive(image_archives["test"]),
133
+ # # "annotation_path": test_annotation_path, # ✅ Corrected path
134
+ # # },
135
+ # # ),
136
+ # # ]
137
+
138
+ # def _generate_examples(self, images, annotation_path):
139
+ # """Generate dataset examples by matching images to their corresponding annotations."""
140
+
141
+ # annotations = {}
142
+
143
+ # # Determine whether we're processing train or test split
144
+ # if "train" in annotation_path:
145
+ # annotation_split = "train"
146
+ # elif "test" in annotation_path:
147
+ # annotation_split = "test"
148
+ # else:
149
+ # raise ValueError(f"Unknown annotation path: {annotation_path}")
150
+
151
+ # ann_dir = annotation_path
152
+
153
+ # print(f"Extracted annotations path: {annotation_path}")
154
+ # print(f"Looking for annotations in: {ann_dir}")
155
+
156
+ # # Check if annotation directory exists
157
+ # if not os.path.exists(ann_dir):
158
+ # raise FileNotFoundError(f"Annotation directory does not exist: {ann_dir}")
159
+
160
+ # # Extract annotation files and read their contents
161
+ # for ann_file in os.listdir(ann_dir):
162
+ # video_name = os.path.splitext(ann_file)[0] # Extract video folder name from file
163
+ # ann_path = os.path.join(ann_dir, ann_file)
164
+
165
+ # if os.path.isdir(ann_path):
166
+ # continue # Skip directories
167
+
168
+ # print("Processing annotation file:", ann_path)
169
+
170
+ # with open(ann_path, "r", encoding="utf-8") as f:
171
+ # for line in f:
172
+ # parts = line.strip().split()
173
+ # if len(parts) < 8:
174
+ # continue
175
+
176
+ # frame_id, track_id, class_name = parts[:3]
177
+ # bbox = list(map(float, parts[4:8]))
178
+ # class_id = _GT_OBJECT_CLASSES.get(class_name, -1)
179
+ # img_name = f"{frame_id}.jpg"
180
+
181
+ # # Store annotation in a dictionary
182
+ # key = f"{video_name}/{img_name}"
183
+ # if key not in annotations:
184
+ # annotations[key] = []
185
+
186
+ # annotations[key].append(
187
+ # {
188
+ # "bbox": bbox,
189
+ # "class_id": class_id,
190
+ # "track_id": int(track_id),
191
+ # "class_name": class_name,
192
+ # }
193
+ # )
194
+
195
+ # # Yield dataset entries
196
+ # idx = 0
197
+ # for file_path, file_obj in images:
198
+ # img_name = os.path.basename(file_path)
199
+ # video_name = os.path.basename(os.path.dirname(file_path)) # Match the video folder
200
+ # key = f"{video_name}/{img_name}"
201
+
202
+ # if key in annotations:
203
+ # yield idx, {
204
+ # "image": {"path": file_path, "bytes": file_obj.read()},
205
+ # "objects": annotations[key],
206
+ # }
207
+ # idx += 1
208
+
209
+
210
  import os
211
  import datasets
212
  import tarfile
 
242
 
243
  BUILDER_CONFIGS = [
244
  datasets.BuilderConfig(
245
+ name="default",
246
+ description="Dataset with train and test splits",
247
  version=datasets.Version("1.0.0"),
248
  )
249
  ]
 
283
  "test": _TEST_ANNOTATION_ARCHIVE_URL,
284
  }
285
 
 
 
 
286
  # Ensure paths are correctly resolved for the requested split
287
+ extracted_paths = dl_manager.download_and_extract(annotation_urls)
288
+ image_archives = dl_manager.download_and_extract(image_urls)
289
+
290
  # Ensure annotation paths point to the correct subdirectory
291
+ train_annotation_path = os.path.join(extracted_paths["train"], "annotations", "train")
292
+ test_annotation_path = os.path.join(extracted_paths["test"], "annotations", "test")
293
 
294
  return [
295
  datasets.SplitGenerator(
296
+ name=datasets.Split.TRAIN,
297
+ gen_kwargs={
298
+ "images": dl_manager.iter_archive(image_archives["train"]),
299
+ "annotation_path": train_annotation_path,
300
+ },
301
+ ),
302
+ datasets.SplitGenerator(
303
+ name=datasets.Split.TEST,
304
  gen_kwargs={
305
+ "images": dl_manager.iter_archive(image_archives["test"]),
306
+ "annotation_path": test_annotation_path,
307
  },
308
  ),
309
  ]
310
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
  def _generate_examples(self, images, annotation_path):
312
  """Generate dataset examples by matching images to their corresponding annotations."""
313
 
 
378
  "objects": annotations[key],
379
  }
380
  idx += 1