KuAvLab commited on
Commit
81368b8
·
verified ·
1 Parent(s): 7132385

Update EMT.py

Browse files
Files changed (1) hide show
  1. EMT.py +43 -77
EMT.py CHANGED
@@ -3,7 +3,9 @@
3
  import os
4
  import datasets
5
 
 
6
  _HOMEPAGE = "https://github.com/AV-Lab/emt-dataset"
 
7
  _LICENSE = "CC-BY-SA 4.0"
8
 
9
  _CITATION = """
@@ -23,31 +25,25 @@ A multi-task dataset for detection, tracking, prediction, and intention predicti
23
  This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection + tracking.
24
  """
25
 
26
- # Image archive URL
27
- _IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/emt_images.tar.gz"
28
 
29
- # Annotations URL (organized in train/test subfolders)
30
- _ANNOTATION_REPO = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/annotations"
31
- # "https://huggingface.co/datasets/Murdism/EMT/resolve/main/annotations"
32
 
33
- _GT_OBJECT_CLASSES = {
34
- 0: "Pedestrian",
35
- 1: "Cyclist",
36
- 2: "Motorbike",
37
- 3: "Small_motorised_vehicle",
38
- 4: "Car",
39
- 5: "Medium_vehicle",
40
- 6: "Large_vehicle",
41
- 7: "Bus",
42
- 8: "Emergency_vehicle",
43
- }
44
-
45
- # Update: Consider using a predefined set of object classes for easier filtering
46
- OBJECT_CLASSES = {v: k for k, v in _GT_OBJECT_CLASSES.items()}
47
 
48
  class EMT(datasets.GeneratorBasedBuilder):
49
  """EMT dataset."""
50
 
 
 
 
 
 
 
 
 
51
  def _info(self):
52
  return datasets.DatasetInfo(
53
  description=_DESCRIPTION,
@@ -56,7 +52,7 @@ class EMT(datasets.GeneratorBasedBuilder):
56
  "image": datasets.Image(),
57
  "objects": datasets.Sequence(
58
  {
59
- "bbox": datasets.Sequence(datasets.Value("float32")),
60
  "class_id": datasets.Value("int32"),
61
  "track_id": datasets.Value("int32"),
62
  "class_name": datasets.Value("string"),
@@ -71,7 +67,12 @@ class EMT(datasets.GeneratorBasedBuilder):
71
  )
72
 
73
  def _split_generators(self, dl_manager):
74
- archive_path = dl_manager.download(_IMAGE_ARCHIVE_URL)
 
 
 
 
 
75
  annotation_paths = {
76
  "train": dl_manager.download_and_extract(f"{_ANNOTATION_REPO}/train/"),
77
  "test": dl_manager.download_and_extract(f"{_ANNOTATION_REPO}/test/"),
@@ -81,49 +82,48 @@ class EMT(datasets.GeneratorBasedBuilder):
81
  datasets.SplitGenerator(
82
  name=datasets.Split.TRAIN,
83
  gen_kwargs={
84
- "images": dl_manager.iter_archive(archive_path),
85
  "annotation_path": annotation_paths["train"],
86
  },
87
  ),
88
  datasets.SplitGenerator(
89
  name=datasets.Split.TEST,
90
  gen_kwargs={
91
- "images": dl_manager.iter_archive(archive_path),
92
  "annotation_path": annotation_paths["test"],
93
  },
94
  ),
95
- ]
96
- def _generate_examples(self, images, annotation_path):
97
- """Generate examples from annotations and image archive."""
98
 
99
- # Dictionary to store annotations
 
100
  annotations = {}
101
 
102
- # Process each image in the dataset
103
  for file_path, file_obj in images:
104
  img_name = os.path.basename(file_path) # e.g., "000001.jpg"
105
  video_name = os.path.basename(os.path.dirname(file_path)) # e.g., "video_1112"
106
 
107
- # Expected annotation file
108
  ann_file = os.path.join(annotation_path, f"{video_name}.txt")
109
 
110
- # Read annotations only for the current video
111
  if os.path.exists(ann_file):
 
 
 
 
 
 
112
  with open(ann_file, "r", encoding="utf-8") as f:
113
  for line in f:
114
  parts = line.strip().split()
115
- if len(parts) < 8: # Ensure there are enough elements
116
  continue
117
-
118
  frame_id, track_id, class_name = parts[:3]
119
- bbox = list(map(float, parts[4:8])) # Extract bounding box
120
- class_id = _GT_OBJECT_CLASSES.get(class_name, -1) # Convert class_name to numeric ID
121
 
122
- # Match annotations to the correct image
123
  if f"{frame_id}.jpg" == img_name:
124
- if img_name not in annotations:
125
- annotations[img_name] = []
126
- annotations[img_name].append(
127
  {
128
  "bbox": bbox,
129
  "class_id": class_id,
@@ -136,46 +136,12 @@ class EMT(datasets.GeneratorBasedBuilder):
136
  idx = 0
137
  for file_path, file_obj in images:
138
  img_name = os.path.basename(file_path)
139
- if img_name in annotations:
 
 
 
140
  yield idx, {
141
  "image": {"path": file_path, "bytes": file_obj.read()},
142
- "objects": annotations[img_name],
143
  }
144
  idx += 1
145
-
146
- # def _generate_examples(self, images, annotation_path):
147
- # """Generate examples from annotations and image archive."""
148
-
149
- # # Load annotation files
150
- # annotations = {}
151
- # for root, _, files in os.walk(annotation_path):
152
- # for file in files:
153
- # with open(os.path.join(root, file), "r", encoding="utf-8") as f:
154
- # for line in f:
155
- # parts = line.strip().split()
156
- # frame_id, track_id, class_name = parts[:3]
157
- # bbox = list(map(float, parts[4:8])) # Extract bounding box
158
- # class_id = _GT_OBJECT_CLASSES.get(class_name, -1) # Convert class_name to numeric ID, default to -1 if not found
159
-
160
- # img_path = f"{frame_id}.jpg"
161
- # if img_path not in annotations:
162
- # annotations[img_path] = []
163
- # annotations[img_path].append(
164
- # {
165
- # "bbox": bbox,
166
- # "class_id": class_id,
167
- # "track_id": int(track_id),
168
- # "class_name": class_name,
169
- # }
170
- # )
171
-
172
- # # Yield dataset entries
173
- # idx = 0
174
- # for file_path, file_obj in images:
175
- # img_name = os.path.basename(file_path)
176
- # if img_name in annotations:
177
- # yield idx, {
178
- # "image": {"path": file_path, "bytes": file_obj.read()},
179
- # "objects": annotations[img_name],
180
- # }
181
- # idx += 1
 
3
  import os
4
  import datasets
5
 
6
+
7
  _HOMEPAGE = "https://github.com/AV-Lab/emt-dataset"
8
+
9
  _LICENSE = "CC-BY-SA 4.0"
10
 
11
  _CITATION = """
 
25
  This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection + tracking.
26
  """
27
 
28
+ # Annotation repository
29
+ _ANNOTATION_REPO = "https://huggingface.co/datasets/Murdism/EMT/resolve/main/labels"
30
 
31
+ # Tar file URLs for images
32
+ _TRAIN_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/blob/main/train_images.tar.gz"
33
+ _TEST_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/blob/main/test_images.tar.gz"
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
  class EMT(datasets.GeneratorBasedBuilder):
37
  """EMT dataset."""
38
 
39
+ BUILDER_CONFIGS = [
40
+ datasets.BuilderConfig(
41
+ name="full_size",
42
+ description="All images are in their original size.",
43
+ version=datasets.Version("1.0.0"),
44
+ )
45
+ ]
46
+
47
  def _info(self):
48
  return datasets.DatasetInfo(
49
  description=_DESCRIPTION,
 
52
  "image": datasets.Image(),
53
  "objects": datasets.Sequence(
54
  {
55
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
56
  "class_id": datasets.Value("int32"),
57
  "track_id": datasets.Value("int32"),
58
  "class_name": datasets.Value("string"),
 
67
  )
68
 
69
  def _split_generators(self, dl_manager):
70
+ """Download and extract train/test images and annotations."""
71
+ image_paths = {
72
+ "train": dl_manager.download_and_extract(_TRAIN_IMAGE_ARCHIVE_URL),
73
+ "test": dl_manager.download_and_extract(_TEST_IMAGE_ARCHIVE_URL),
74
+ }
75
+
76
  annotation_paths = {
77
  "train": dl_manager.download_and_extract(f"{_ANNOTATION_REPO}/train/"),
78
  "test": dl_manager.download_and_extract(f"{_ANNOTATION_REPO}/test/"),
 
82
  datasets.SplitGenerator(
83
  name=datasets.Split.TRAIN,
84
  gen_kwargs={
85
+ "images": dl_manager.iter_archive(image_paths["train"]),
86
  "annotation_path": annotation_paths["train"],
87
  },
88
  ),
89
  datasets.SplitGenerator(
90
  name=datasets.Split.TEST,
91
  gen_kwargs={
92
+ "images": dl_manager.iter_archive(image_paths["test"]),
93
  "annotation_path": annotation_paths["test"],
94
  },
95
  ),
96
+ ]
 
 
97
 
98
+ def _generate_examples(self, images, annotation_path):
99
+ """Generate dataset examples by matching images to their corresponding annotations."""
100
  annotations = {}
101
 
 
102
  for file_path, file_obj in images:
103
  img_name = os.path.basename(file_path) # e.g., "000001.jpg"
104
  video_name = os.path.basename(os.path.dirname(file_path)) # e.g., "video_1112"
105
 
 
106
  ann_file = os.path.join(annotation_path, f"{video_name}.txt")
107
 
 
108
  if os.path.exists(ann_file):
109
+ if ann_file not in annotations:
110
+ annotations[ann_file] = {}
111
+
112
+ if img_name not in annotations[ann_file]:
113
+ annotations[ann_file][img_name] = []
114
+
115
  with open(ann_file, "r", encoding="utf-8") as f:
116
  for line in f:
117
  parts = line.strip().split()
118
+ if len(parts) < 8:
119
  continue
120
+
121
  frame_id, track_id, class_name = parts[:3]
122
+ bbox = list(map(float, parts[4:8]))
123
+ class_id = _GT_OBJECT_CLASSES.get(class_name, -1)
124
 
 
125
  if f"{frame_id}.jpg" == img_name:
126
+ annotations[ann_file][img_name].append(
 
 
127
  {
128
  "bbox": bbox,
129
  "class_id": class_id,
 
136
  idx = 0
137
  for file_path, file_obj in images:
138
  img_name = os.path.basename(file_path)
139
+ video_name = os.path.basename(os.path.dirname(file_path))
140
+ ann_file = os.path.join(annotation_path, f"{video_name}.txt")
141
+
142
+ if ann_file in annotations and img_name in annotations[ann_file]:
143
  yield idx, {
144
  "image": {"path": file_path, "bytes": file_obj.read()},
145
+ "objects": annotations[ann_file][img_name],
146
  }
147
  idx += 1