Update EMT.py
Browse files
EMT.py
CHANGED
|
@@ -281,9 +281,50 @@ class EMT(datasets.GeneratorBasedBuilder):
|
|
| 281 |
citation=_CITATION,
|
| 282 |
)
|
| 283 |
|
| 284 |
-
def _split_generators(self, dl_manager):
|
| 285 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 286 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 287 |
image_urls = {
|
| 288 |
"train": _TRAIN_IMAGE_ARCHIVE_URL,
|
| 289 |
"test": _TEST_IMAGE_ARCHIVE_URL,
|
|
@@ -293,32 +334,29 @@ class EMT(datasets.GeneratorBasedBuilder):
|
|
| 293 |
"train": _TRAIN_ANNOTATION_ARCHIVE_URL,
|
| 294 |
"test": _TEST_ANNOTATION_ARCHIVE_URL,
|
| 295 |
}
|
| 296 |
-
|
| 297 |
-
#
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
#
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
|
|
|
|
|
|
|
|
|
| 306 |
return [
|
| 307 |
datasets.SplitGenerator(
|
| 308 |
-
name=datasets.Split.TRAIN,
|
| 309 |
gen_kwargs={
|
| 310 |
-
"
|
| 311 |
-
"annotation_path":
|
| 312 |
},
|
| 313 |
-
)
|
| 314 |
-
datasets.SplitGenerator(
|
| 315 |
-
name=datasets.Split.TEST,
|
| 316 |
-
gen_kwargs={
|
| 317 |
-
"images": dl_manager.iter_archive(image_archives["test"]),
|
| 318 |
-
"annotation_path": test_annotation_path,
|
| 319 |
-
},
|
| 320 |
-
),
|
| 321 |
]
|
|
|
|
| 322 |
|
| 323 |
def _generate_examples(self, images, annotation_path):
|
| 324 |
"""Generate dataset examples by matching images to their corresponding annotations."""
|
|
|
|
| 281 |
citation=_CITATION,
|
| 282 |
)
|
| 283 |
|
| 284 |
+
# def _split_generators(self, dl_manager):
|
| 285 |
+
# """Download (if not cached) and prepare dataset splits."""
|
| 286 |
+
|
| 287 |
+
# image_urls = {
|
| 288 |
+
# "train": _TRAIN_IMAGE_ARCHIVE_URL,
|
| 289 |
+
# "test": _TEST_IMAGE_ARCHIVE_URL,
|
| 290 |
+
# }
|
| 291 |
+
|
| 292 |
+
# annotation_urls = {
|
| 293 |
+
# "train": _TRAIN_ANNOTATION_ARCHIVE_URL,
|
| 294 |
+
# "test": _TEST_ANNOTATION_ARCHIVE_URL,
|
| 295 |
+
# }
|
| 296 |
+
|
| 297 |
+
# # Ensure paths are correctly resolved for the requested split
|
| 298 |
+
# extracted_paths = dl_manager.download_and_extract(annotation_urls)
|
| 299 |
+
# image_archives = dl_manager.download_and_extract(image_urls)
|
| 300 |
+
|
| 301 |
+
# # Ensure annotation paths point to the correct subdirectory
|
| 302 |
+
# train_annotation_path = os.path.join(extracted_paths["train"], "EMT", "annotations", "train")
|
| 303 |
+
# test_annotation_path = os.path.join(extracted_paths["test"], "EMT", "annotations", "test")
|
| 304 |
+
|
| 305 |
|
| 306 |
+
# return [
|
| 307 |
+
# datasets.SplitGenerator(
|
| 308 |
+
# name=datasets.Split.TRAIN,
|
| 309 |
+
# gen_kwargs={
|
| 310 |
+
# "images": dl_manager.iter_archive(image_archives["train"]),
|
| 311 |
+
# "annotation_path": train_annotation_path,
|
| 312 |
+
# },
|
| 313 |
+
# ),
|
| 314 |
+
# datasets.SplitGenerator(
|
| 315 |
+
# name=datasets.Split.TEST,
|
| 316 |
+
# gen_kwargs={
|
| 317 |
+
# "images": dl_manager.iter_archive(image_archives["test"]),
|
| 318 |
+
# "annotation_path": test_annotation_path,
|
| 319 |
+
# },
|
| 320 |
+
# ),
|
| 321 |
+
# ]
|
| 322 |
+
def _split_generators(self, dl_manager):
|
| 323 |
+
"""Download (if not cached) and prepare only the requested dataset split."""
|
| 324 |
+
|
| 325 |
+
# Get the requested split
|
| 326 |
+
requested_split = self.config.name
|
| 327 |
+
|
| 328 |
image_urls = {
|
| 329 |
"train": _TRAIN_IMAGE_ARCHIVE_URL,
|
| 330 |
"test": _TEST_IMAGE_ARCHIVE_URL,
|
|
|
|
| 334 |
"train": _TRAIN_ANNOTATION_ARCHIVE_URL,
|
| 335 |
"test": _TEST_ANNOTATION_ARCHIVE_URL,
|
| 336 |
}
|
| 337 |
+
|
| 338 |
+
# Validate split name
|
| 339 |
+
if requested_split not in image_urls:
|
| 340 |
+
raise ValueError(f"Invalid split '{requested_split}'. Available splits: 'train', 'test'.")
|
| 341 |
+
|
| 342 |
+
# Extract only the requested split
|
| 343 |
+
extracted_images = dl_manager.download_and_extract({requested_split: image_urls[requested_split]})
|
| 344 |
+
extracted_annotations = dl_manager.download_and_extract({requested_split: annotation_urls[requested_split]})
|
| 345 |
+
|
| 346 |
+
# Define paths based on the requested split
|
| 347 |
+
annotation_path = os.path.join(extracted_annotations[requested_split], "annotations", requested_split)
|
| 348 |
+
image_path = extracted_images[requested_split]
|
| 349 |
+
|
| 350 |
return [
|
| 351 |
datasets.SplitGenerator(
|
| 352 |
+
name=datasets.Split.TRAIN if requested_split == "train" else datasets.Split.TEST,
|
| 353 |
gen_kwargs={
|
| 354 |
+
"image_dir": image_path,
|
| 355 |
+
"annotation_path": annotation_path,
|
| 356 |
},
|
| 357 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 358 |
]
|
| 359 |
+
|
| 360 |
|
| 361 |
def _generate_examples(self, images, annotation_path):
|
| 362 |
"""Generate dataset examples by matching images to their corresponding annotations."""
|