KuAvLab commited on
Commit
4da79e9
·
verified ·
1 Parent(s): cb10dbd

Update EMT.py

Browse files
Files changed (1) hide show
  1. EMT.py +1 -163
EMT.py CHANGED
@@ -1,165 +1,3 @@
1
- # import os
2
- # import datasets
3
- # import tarfile
4
-
5
- # _HOMEPAGE = "https://github.com/AV-Lab/emt-dataset"
6
- # _LICENSE = "CC-BY-SA 4.0"
7
- # _CITATION = """
8
- # @article{EMTdataset2025,
9
- # title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region},
10
- # author={Nadya Abdel Madjid and Murad Mebrahtu and Abdelmoamen Nasser and Bilal Hassan and Naoufel Werghi and Jorge Dias and Majid Khonji},
11
- # year={2025},
12
- # eprint={2502.19260},
13
- # archivePrefix={arXiv},
14
- # primaryClass={cs.CV},
15
- # url={https://arxiv.org/abs/2502.19260}
16
- # }
17
- # """
18
-
19
- # _DESCRIPTION = """\
20
- # A multi-task dataset for detection, tracking, prediction, and intention prediction.
21
- # This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection and tracking.
22
- # """
23
-
24
- # # # Annotation repository
25
- # # _ANNOTATION_REPO = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/annotations"
26
-
27
- # # Tar file URLs for images
28
- # _TRAIN_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_images.tar.gz"
29
- # _TEST_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_images.tar.gz"
30
-
31
- # # Tar file URLs for annotations
32
- # _TRAIN_ANNOTATION_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_annotation.tar.gz"
33
- # _TEST_ANNOTATION_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_annotation.tar.gz"
34
-
35
-
36
- # class EMT(datasets.GeneratorBasedBuilder):
37
- # """EMT dataset."""
38
-
39
- # BUILDER_CONFIGS = [
40
- # datasets.BuilderConfig(
41
- # name="full_size",
42
- # description="All images are in their original size.",
43
- # version=datasets.Version("1.0.0"),
44
- # )
45
- # ]
46
-
47
- # def _info(self):
48
- # return datasets.DatasetInfo(
49
- # description=_DESCRIPTION,
50
- # features=datasets.Features(
51
- # {
52
- # "image": datasets.Image(),
53
- # "objects": datasets.Sequence(
54
- # {
55
- # "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
56
- # "class_id": datasets.Value("int32"),
57
- # "track_id": datasets.Value("int32"),
58
- # "class_name": datasets.Value("string"),
59
- # }
60
- # ),
61
- # }
62
- # ),
63
- # supervised_keys=None,
64
- # homepage=_HOMEPAGE,
65
- # license=_LICENSE,
66
- # citation=_CITATION,
67
- # )
68
-
69
- # def _split_generators(self, dl_manager):
70
- # """Download train/test images and annotations."""
71
- # image_urls = {
72
- # "train": _TRAIN_IMAGE_ARCHIVE_URL,
73
- # "test": _TEST_IMAGE_ARCHIVE_URL,
74
- # }
75
-
76
- # # Download the tar file for annotations
77
- # # annotation_urls = {
78
- # # "train": _TRAIN_ANNOTATION_ARCHIVE_URL,
79
- # # "test": _TEST_ANNOTATION_ARCHIVE_URL,
80
- # # }
81
- # annotation_urls = {
82
- # "train": "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_annotation.tar.gz",
83
- # "test": "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_annotation.tar.gz",
84
- # }
85
- # # Download image files
86
- # images = {
87
- # "train": dl_manager.iter_archive(image_urls["train"]),
88
- # "test": dl_manager.iter_archive(image_urls["test"]),
89
- # }
90
-
91
- # # Download annotation files and extract them
92
- # annotations = {
93
- # "train": dl_manager.download_and_extract(annotation_urls["train"]),
94
- # "test": dl_manager.download_and_extract(annotation_urls["test"]),
95
- # }
96
-
97
- # return [
98
- # datasets.SplitGenerator(
99
- # name=datasets.Split.TRAIN,
100
- # gen_kwargs={
101
- # "images": images["train"],
102
- # "annotation_path": annotations["train"],
103
- # },
104
- # ),
105
- # datasets.SplitGenerator(
106
- # name=datasets.Split.TEST,
107
- # gen_kwargs={
108
- # "images": images["test"],
109
- # "annotation_path": annotations["test"],
110
- # },
111
- # ),
112
- # ]
113
-
114
- # def _generate_examples(self, images, annotation_path):
115
- # """Generate dataset examples by matching images to their corresponding annotations."""
116
-
117
- # annotations = {}
118
-
119
- # # Load all annotations into memory
120
- # for ann_file in os.listdir(annotation_path):
121
- # video_name = os.path.splitext(ann_file)[0] # Get video folder name from the annotation file
122
- # ann_path = os.path.join(annotation_path, ann_file)
123
- # print("ann_path:,",ann_path,"\nannotation_path: ",annotation_path)
124
-
125
- # with open(ann_path, "r", encoding="utf-8") as f:
126
- # for line in f:
127
- # parts = line.strip().split()
128
- # if len(parts) < 8:
129
- # continue
130
-
131
- # frame_id, track_id, class_name = parts[:3]
132
- # bbox = list(map(float, parts[4:8]))
133
- # class_id = _GT_OBJECT_CLASSES.get(class_name, -1)
134
- # img_name = f"{frame_id}.jpg"
135
-
136
- # # Store annotation in a dictionary
137
- # key = f"{video_name}/{img_name}"
138
- # if key not in annotations:
139
- # annotations[key] = []
140
-
141
- # annotations[key].append(
142
- # {
143
- # "bbox": bbox,
144
- # "class_id": class_id,
145
- # "track_id": int(track_id),
146
- # "class_name": class_name,
147
- # }
148
- # )
149
-
150
- # # Yield dataset entries
151
- # idx = 0
152
- # for file_path, file_obj in images:
153
- # img_name = os.path.basename(file_path)
154
- # video_name = os.path.basename(os.path.dirname(file_path)) # Match the video folder
155
- # key = f"{video_name}/{img_name}"
156
-
157
- # if key in annotations:
158
- # yield idx, {
159
- # "image": {"path": file_path, "bytes": file_obj.read()},
160
- # "objects": annotations[key],
161
- # }
162
- # idx += 1
163
 
164
  import os
165
  import datasets
@@ -275,7 +113,7 @@ class EMT(datasets.GeneratorBasedBuilder):
275
  else:
276
  raise ValueError(f"Unknown annotation path: {annotation_path}")
277
 
278
- ann_dir = os.path.join(annotation_path, annotation_split)
279
 
280
  print(f"Extracted annotations path: {annotation_path}")
281
  print(f"Looking for annotations in: {ann_dir}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
  import os
3
  import datasets
 
113
  else:
114
  raise ValueError(f"Unknown annotation path: {annotation_path}")
115
 
116
+ ann_dir = annotation_path
117
 
118
  print(f"Extracted annotations path: {annotation_path}")
119
  print(f"Looking for annotations in: {ann_dir}")