Caltech-101 / Caltech-101.py
SaulLu's picture
add configs
6aa1df0
raw
history blame
7.58 kB
# Copyright 2022 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Caltech 101 loading script"""
from pathlib import Path
import datasets
import numpy as np
from datasets.tasks import ImageClassification
_CITATION = """\
@article{FeiFei2004LearningGV,
title={Learning Generative Visual Models from Few Training Examples: An Incremental Bayesian Approach Tested on 101 Object Categories},
author={Li Fei-Fei and Rob Fergus and Pietro Perona},
journal={Computer Vision and Pattern Recognition Workshop},
year={2004},
}
"""
_DESCRIPTION = """\
Pictures of objects belonging to 101 categories.
About 40 to 800 images per category.
Most categories have about 50 images.
Collected in September 2003 by Fei-Fei Li, Marco Andreetto, and Marc'Aurelio Ranzato.
The size of each image is roughly 300 x 200 pixels.
"""
_HOMEPAGE = "https://data.caltech.edu/records/20086"
_LICENSE = "CC BY 4.0"
_DATA_URL = "caltech-101.zip"
_NAMES = [
"accordion",
"airplanes",
"anchor",
"ant",
"background_google",
"barrel",
"bass",
"beaver",
"binocular",
"bonsai",
"brain",
"brontosaurus",
"buddha",
"butterfly",
"camera",
"cannon",
"car_side",
"ceiling_fan",
"cellphone",
"chair",
"chandelier",
"cougar_body",
"cougar_face",
"crab",
"crayfish",
"crocodile",
"crocodile_head",
"cup",
"dalmatian",
"dollar_bill",
"dolphin",
"dragonfly",
"electric_guitar",
"elephant",
"emu",
"euphonium",
"ewer",
"faces",
"faces_easy",
"ferry",
"flamingo",
"flamingo_head",
"garfield",
"gerenuk",
"gramophone",
"grand_piano",
"hawksbill",
"headphone",
"hedgehog",
"helicopter",
"ibis",
"inline_skate",
"joshua_tree",
"kangaroo",
"ketch",
"lamp",
"laptop",
"leopards",
"llama",
"lobster",
"lotus",
"mandolin",
"mayfly",
"menorah",
"metronome",
"minaret",
"motorbikes",
"nautilus",
"octopus",
"okapi",
"pagoda",
"panda",
"pigeon",
"pizza",
"platypus",
"pyramid",
"revolver",
"rhino",
"rooster",
"saxophone",
"schooner",
"scissors",
"scorpion",
"sea_horse",
"snoopy",
"soccer_ball",
"stapler",
"starfish",
"stegosaurus",
"stop_sign",
"strawberry",
"sunflower",
"tick",
"trilobite",
"umbrella",
"watch",
"water_lilly",
"wheelchair",
"wild_cat",
"windsor_chair",
"wrench",
"yin_yang",
]
_TRAIN_POINTS_PER_CLASS = 30
class Caltech101(datasets.GeneratorBasedBuilder):
"""Caltech 101 dataset."""
VERSION = datasets.Version("1.0.0")
_BUILDER_CONFIG_WITH_BACKGROUND = datasets.BuilderConfig(
name="with_background_category",
version=VERSION,
description="Dataset containing only the 101 categories.",
)
_BUILDER_CONFIG_WITHOUT_BACKGROUND = datasets.BuilderConfig(
name="without_background_category",
version=VERSION,
description="Dataset containing the 101 categories and the additonnal background one.",
)
BUILDER_CONFIGS = [
_BUILDER_CONFIG_WITH_BACKGROUND,
_BUILDER_CONFIG_WITHOUT_BACKGROUND,
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"label": datasets.features.ClassLabel(names=_NAMES),
}
),
supervised_keys=("image", "label"),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
task_templates=ImageClassification(
image_column="image", label_column="label"
),
)
def _split_generators(self, dl_manager):
data_root_dir = dl_manager.download_and_extract(_DATA_URL)
compress_folder_path = [
file
for file in dl_manager.iter_files(data_root_dir)
if Path(file).name == "101_ObjectCategories.tar.gz"
][0]
data_dir = dl_manager.extract(compress_folder_path)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_dir,
"split": "train",
"config_name": self.config.name,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": data_dir,
"split": "test",
"config_name": self.config.name,
},
),
]
def _generate_examples(self, filepath, split, config_name):
# Same stratagy as the one proposed in TF datasets: 30 random examples from each class are added to the train
# split, and the remainder are added to the test split.
# Source: https://github.com/tensorflow/datasets/blob/1106d587f97c4fca68c5b593dc7dc48c790ffa8c/tensorflow_datasets/image_classification/caltech.py#L88-L140
is_train_split = split == "train"
data_dir = Path(filepath) / "101_ObjectCategories"
# Sets random seed so the random partitioning of files is the same when
# called for the train and test splits.
numpy_original_state = np.random.get_state()
np.random.seed(1234)
for class_dir in data_dir.iterdir():
# print(class_dir)
fnames = [
image_path
for image_path in class_dir.iterdir()
if image_path.name.endswith(".jpg")
]
# _TRAIN_POINTS_PER_CLASS datapoints are sampled for the train split,
# the others constitute the test split.
if _TRAIN_POINTS_PER_CLASS > len(fnames):
raise ValueError(
"Fewer than {} ({}) points in class {}".format(
_TRAIN_POINTS_PER_CLASS, len(fnames), class_dir.name
)
)
train_fnames = np.random.choice(
fnames, _TRAIN_POINTS_PER_CLASS, replace=False
)
test_fnames = set(fnames).difference(train_fnames)
fnames_to_emit = train_fnames if is_train_split else test_fnames
if (
class_dir.name == "BACKGROUND_Google"
and config_name == self._BUILDER_CONFIG_WITHOUT_BACKGROUND.name
):
print("skip BACKGROUND_Google")
continue
for image_file in fnames_to_emit:
record = {
"image": str(image_file),
"label": class_dir.name.lower(),
}
yield "%s/%s" % (class_dir.name.lower(), image_file), record
# Resets the seeds to their previous states.
np.random.set_state(numpy_original_state)