Update wmms.py
Browse files
wmms.py
CHANGED
@@ -17,7 +17,7 @@ SAMPLE_RATE = 16_000
|
|
17 |
|
18 |
_COMPRESSED_FILENAME = 'watkins.zip'
|
19 |
|
20 |
-
CLASSES = ['Atlantic_Spotted_Dolphin', 'Bearded_Seal', 'Beluga,_White_Whale', 'Bottlenose_Dolphin', 'Bowhead_Whale', 'Clymene_Dolphin', 'Common_Dolphin', 'False_Killer_Whale', 'Fin,_Finback_Whale', 'Frasers_Dolphin', 'Grampus,_Rissos_Dolphin', 'Harp_Seal', 'Humpback_Whale', 'Killer_Whale', 'Leopard_Seal', 'Long-Finned_Pilot_Whale', 'Melon_Headed_Whale', 'Minke_Whale', 'Narwhal', 'Northern_Right_Whale', 'Pantropical_Spotted_Dolphin', 'Ross_Seal', 'Rough-Toothed_Dolphin', 'Short-Finned_Pacific_Pilot_Whale', 'Southern_Right_Whale', 'Sperm_Whale', 'Spinner_Dolphin', 'Striped_Dolphin', 'Walrus', '
|
21 |
|
22 |
|
23 |
class WmmsConfig(datasets.BuilderConfig):
|
@@ -59,33 +59,33 @@ class WMMS(datasets.GeneratorBasedBuilder):
|
|
59 |
"""Returns SplitGenerators."""
|
60 |
archive_path = dl_manager.extract(_COMPRESSED_FILENAME)
|
61 |
extensions = ['.wav']
|
62 |
-
_, filepaths = fast_scandir(archive_path, extensions, recursive=True)
|
63 |
-
labels = [default_find_classes(f) for f in filepaths]
|
64 |
|
65 |
-
#
|
66 |
-
|
67 |
-
|
|
|
68 |
|
69 |
# Step 1: Organize samples by class
|
70 |
class_to_files = defaultdict(list)
|
71 |
for filepath, label in zip(filepaths, labels):
|
72 |
class_to_files[label].append(filepath)
|
73 |
|
74 |
-
# Step 2: Select exactly
|
|
|
75 |
test_files, test_labels = [], []
|
76 |
train_files, train_labels = [], []
|
77 |
|
78 |
for label, files in class_to_files.items():
|
79 |
-
if len(files) <
|
80 |
-
raise ValueError(f"Not enough samples for class {label}") # Ensure each class has at least
|
81 |
|
82 |
random.Random(914).shuffle(files) # Shuffle to ensure randomness
|
83 |
|
84 |
-
test_files.extend(files[:
|
85 |
-
test_labels.extend([label] *
|
86 |
|
87 |
-
train_files.extend(files[
|
88 |
-
train_labels.extend([label] * (len(files) -
|
89 |
|
90 |
return [
|
91 |
datasets.SplitGenerator(
|
|
|
17 |
|
18 |
_COMPRESSED_FILENAME = 'watkins.zip'
|
19 |
|
20 |
+
CLASSES = ['Atlantic_Spotted_Dolphin', 'Bearded_Seal', 'Beluga,_White_Whale', 'Bottlenose_Dolphin', 'Bowhead_Whale', 'Clymene_Dolphin', 'Common_Dolphin', 'False_Killer_Whale', 'Fin,_Finback_Whale', 'Frasers_Dolphin', 'Grampus,_Rissos_Dolphin', 'Harp_Seal', 'Humpback_Whale', 'Killer_Whale', 'Leopard_Seal', 'Long-Finned_Pilot_Whale', 'Melon_Headed_Whale', 'Minke_Whale', 'Narwhal', 'Northern_Right_Whale', 'Pantropical_Spotted_Dolphin', 'Ross_Seal', 'Rough-Toothed_Dolphin', 'Short-Finned_Pacific_Pilot_Whale', 'Southern_Right_Whale', 'Sperm_Whale', 'Spinner_Dolphin', 'Striped_Dolphin', 'Walrus', 'White-beaked_Dolphin', 'White-sided_Dolphin']
|
21 |
|
22 |
|
23 |
class WmmsConfig(datasets.BuilderConfig):
|
|
|
59 |
"""Returns SplitGenerators."""
|
60 |
archive_path = dl_manager.extract(_COMPRESSED_FILENAME)
|
61 |
extensions = ['.wav']
|
|
|
|
|
62 |
|
63 |
+
_remove_class = 'Weddell_Seal' # only 2 samples in the dataset
|
64 |
+
_, _walker = fast_scandir(archive_path, extensions, recursive=True)
|
65 |
+
filepaths = [f for f in _walker if default_find_classes(f) != _remove_class]
|
66 |
+
labels = [default_find_classes(f) for f in filepaths]
|
67 |
|
68 |
# Step 1: Organize samples by class
|
69 |
class_to_files = defaultdict(list)
|
70 |
for filepath, label in zip(filepaths, labels):
|
71 |
class_to_files[label].append(filepath)
|
72 |
|
73 |
+
# Step 2: Select exactly n samples per class for the test set
|
74 |
+
n_shot = 5
|
75 |
test_files, test_labels = [], []
|
76 |
train_files, train_labels = [], []
|
77 |
|
78 |
for label, files in class_to_files.items():
|
79 |
+
if len(files) < n_shot:
|
80 |
+
raise ValueError(f"Not enough samples for class {label}") # Ensure each class has at least n_shot samples
|
81 |
|
82 |
random.Random(914).shuffle(files) # Shuffle to ensure randomness
|
83 |
|
84 |
+
test_files.extend(files[:n_shot]) # Pick first n_shot for test
|
85 |
+
test_labels.extend([label] * n_shot)
|
86 |
|
87 |
+
train_files.extend(files[n_shot:]) # Remaining go to train
|
88 |
+
train_labels.extend([label] * (len(files) - n_shot))
|
89 |
|
90 |
return [
|
91 |
datasets.SplitGenerator(
|