KuAvLab commited on
Commit
f768d4b
·
verified ·
1 Parent(s): 73c43b7

Update EMT.py

Browse files
Files changed (1) hide show
  1. EMT.py +35 -31
EMT.py CHANGED
@@ -1,12 +1,9 @@
1
- """EMT dataset."""
2
-
3
  import os
4
  import datasets
 
5
 
6
  _HOMEPAGE = "https://github.com/AV-Lab/emt-dataset"
7
-
8
  _LICENSE = "CC-BY-SA 4.0"
9
-
10
  _CITATION = """
11
  @article{EMTdataset2025,
12
  title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region},
@@ -31,6 +28,10 @@ _ANNOTATION_REPO = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/ann
31
  _TRAIN_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_images.tar.gz"
32
  _TEST_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_images.tar.gz"
33
 
 
 
 
 
34
 
35
  class EMT(datasets.GeneratorBasedBuilder):
36
  """EMT dataset."""
@@ -72,10 +73,10 @@ class EMT(datasets.GeneratorBasedBuilder):
72
  "test": _TEST_IMAGE_ARCHIVE_URL,
73
  }
74
 
75
- # Download the individual annotation files for train and test
76
  annotation_urls = {
77
- "train": _ANNOTATION_REPO + "/train/",
78
- "test": _ANNOTATION_REPO + "/test/",
79
  }
80
 
81
  # Download image files
@@ -84,7 +85,7 @@ class EMT(datasets.GeneratorBasedBuilder):
84
  "test": dl_manager.iter_archive(image_urls["test"]),
85
  }
86
 
87
- # Download annotation files
88
  annotations = {
89
  "train": dl_manager.download_and_extract(annotation_urls["train"]),
90
  "test": dl_manager.download_and_extract(annotation_urls["test"]),
@@ -112,35 +113,38 @@ class EMT(datasets.GeneratorBasedBuilder):
112
 
113
  annotations = {}
114
 
115
- # Load all annotations into memory
116
  for ann_file in os.listdir(annotation_path):
117
  video_name = os.path.splitext(ann_file)[0] # Get video folder name
118
  ann_path = os.path.join(annotation_path, ann_file)
119
 
120
- with open(ann_path, "r", encoding="utf-8") as f:
121
- for line in f:
122
- parts = line.strip().split()
123
- if len(parts) < 8:
124
- continue
125
-
126
- frame_id, track_id, class_name = parts[:3]
127
- bbox = list(map(float, parts[4:8]))
128
- class_id = _GT_OBJECT_CLASSES.get(class_name, -1)
129
- img_name = f"{frame_id}.jpg"
 
 
 
130
 
131
- # Store annotation in a dictionary
132
- key = f"{video_name}/{img_name}"
133
- if key not in annotations:
134
- annotations[key] = []
135
 
136
- annotations[key].append(
137
- {
138
- "bbox": bbox,
139
- "class_id": class_id,
140
- "track_id": int(track_id),
141
- "class_name": class_name,
142
- }
143
- )
144
 
145
  # Yield dataset entries
146
  idx = 0
 
 
 
1
  import os
2
  import datasets
3
+ import tarfile
4
 
5
  _HOMEPAGE = "https://github.com/AV-Lab/emt-dataset"
 
6
  _LICENSE = "CC-BY-SA 4.0"
 
7
  _CITATION = """
8
  @article{EMTdataset2025,
9
  title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region},
 
28
  _TRAIN_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_images.tar.gz"
29
  _TEST_IMAGE_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_images.tar.gz"
30
 
31
+ # Tar file URLs for annotations
32
+ _TRAIN_ANNOTATION_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/train_annotation.tar.gz"
33
+ _TEST_ANNOTATION_ARCHIVE_URL = "https://huggingface.co/datasets/KuAvLab/EMT/resolve/main/test_annotation.tar.gz"
34
+
35
 
36
  class EMT(datasets.GeneratorBasedBuilder):
37
  """EMT dataset."""
 
73
  "test": _TEST_IMAGE_ARCHIVE_URL,
74
  }
75
 
76
+ # Download the tar file for annotations
77
  annotation_urls = {
78
+ "train": _TRAIN_ANNOTATION_ARCHIVE_URL,
79
+ "test": _TEST_ANNOTATION_ARCHIVE_URL,
80
  }
81
 
82
  # Download image files
 
85
  "test": dl_manager.iter_archive(image_urls["test"]),
86
  }
87
 
88
+ # Download annotation files and extract them
89
  annotations = {
90
  "train": dl_manager.download_and_extract(annotation_urls["train"]),
91
  "test": dl_manager.download_and_extract(annotation_urls["test"]),
 
113
 
114
  annotations = {}
115
 
116
+ # Extract the tar.gz file and read the annotations
117
  for ann_file in os.listdir(annotation_path):
118
  video_name = os.path.splitext(ann_file)[0] # Get video folder name
119
  ann_path = os.path.join(annotation_path, ann_file)
120
 
121
+ # Read annotation file
122
+ with tarfile.open(ann_path, "r:gz") as tar:
123
+ for member in tar.getmembers():
124
+ with tar.extractfile(member) as file:
125
+ for line in file:
126
+ parts = line.strip().split()
127
+ if len(parts) < 8:
128
+ continue
129
+
130
+ frame_id, track_id, class_name = parts[:3]
131
+ bbox = list(map(float, parts[4:8]))
132
+ class_id = _GT_OBJECT_CLASSES.get(class_name, -1)
133
+ img_name = f"{frame_id}.jpg"
134
 
135
+ # Store annotation in a dictionary
136
+ key = f"{video_name}/{img_name}"
137
+ if key not in annotations:
138
+ annotations[key] = []
139
 
140
+ annotations[key].append(
141
+ {
142
+ "bbox": bbox,
143
+ "class_id": class_id,
144
+ "track_id": int(track_id),
145
+ "class_name": class_name,
146
+ }
147
+ )
148
 
149
  # Yield dataset entries
150
  idx = 0