mikewang commited on
Commit
ac09b51
·
1 Parent(s): 0d800ab

First version of the Visual attributes in the wild (VAW) dataset.

Browse files
Files changed (2) hide show
  1. README.md +29 -3
  2. vaw.py +220 -0
README.md CHANGED
@@ -1,3 +1,29 @@
1
- ---
2
- license: other
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dataset Card for Visual Attributes in the Wild (VAW)
2
+
3
+ ## Dataset Description
4
+
5
+ - **Homepage:** http://vawdataset.com/
6
+ - **Repository:** https://github.com/adobe-research/vaw_dataset;
7
+ Note: The raw dataset files will be downloaded from: https://github.com/adobe-research/vaw_dataset/tree/main/data, where one can also find additional metadata files such as attribute types. The train split loaded from this hf dataset is a concatenation of the train_part1.json and train_part2.json.
8
+ - **Paper Citation:**
9
+ ```
10
+ @InProceedings{Pham_2021_CVPR,
11
+ author = {Pham, Khoi and Kafle, Kushal and Lin, Zhe and Ding, Zhihong and Cohen, Scott and Tran, Quan and Shrivastava, Abhinav},
12
+ title = {Learning To Predict Visual Attributes in the Wild},
13
+ booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
14
+ month = {June},
15
+ year = {2021},
16
+ pages = {13018-13028}
17
+ }
18
+ ```
19
+ - **LICENSE:** https://github.com/adobe-research/vaw_dataset/blob/main/LICENSE.md
20
+
21
+
22
+ ### Dataset Summary
23
+ A large scale visual attributes dataset with explicitly labelled positive and negative attributes.
24
+
25
+ - 620 Unique Attributes including color, shape, texture, posture and many others
26
+ - 260,895 Instances of different objects
27
+ - 2260 Unique Objects observed in the wild
28
+ - 72,274 Images from the Visual Genome Dataset
29
+ - 4 different evaluation metrics for measuring multi-faceted performance metrics
vaw.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Visual Attributes in the Wild (VAW) dataset"""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+
24
+ _CITATION = """\
25
+ @InProceedings{Pham_2021_CVPR,
26
+ author = {Pham, Khoi and Kafle, Kushal and Lin, Zhe and Ding, Zhihong and Cohen, Scott and Tran, Quan and Shrivastava, Abhinav},
27
+ title = {Learning To Predict Visual Attributes in the Wild},
28
+ booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
29
+ month = {June},
30
+ year = {2021},
31
+ pages = {13018-13028}
32
+ }
33
+ """
34
+
35
+ # TODO: Add description of the dataset here
36
+ # You can copy an official description
37
+ _DESCRIPTION = """\
38
+ Visual Attributes in the Wild (VAW) dataset: https://github.com/adobe-research/vaw_dataset#dataset-setup
39
+ Raw annotations and configs such as attrubte_types can be found at: https://github.com/adobe-research/vaw_dataset/tree/main/data
40
+ Note: The train split loaded from this hf dataset is a concatenation of the train_part1.json and train_part2.json.
41
+ """
42
+
43
+ # TODO: Add a link to an official homepage for the dataset here
44
+ _HOMEPAGE = "http://vawdataset.com/"
45
+
46
+ # TODO: Add the licence for the dataset here if you can find it
47
+ _LICENSE = "https://github.com/adobe-research/vaw_dataset/blob/main/LICENSE.md"
48
+
49
+ # TODO: Add link to the official dataset URLs here
50
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
51
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
52
+ # _URLS = {
53
+ # # "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
54
+ # # "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
55
+ # }
56
+
57
+ # _URL = "https://github.com/adobe-research/vaw_dataset/blob/main/data/"
58
+ _URL = "https://raw.githubusercontent.com/adobe-research/vaw_dataset/main/data/"
59
+ _URLS = {
60
+ "train": {
61
+ "part1": _URL + "train_part1.json",
62
+ "part2": _URL + "train_part2.json"
63
+ },
64
+ "val": _URL + "val.json",
65
+ "test": _URL + "test.json"
66
+ }
67
+
68
+
69
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
70
+ class NewDataset(datasets.GeneratorBasedBuilder):
71
+ """TODO: Short description of my dataset."""
72
+
73
+ VERSION = datasets.Version("1.0.0")
74
+
75
+ # This is an example of a dataset with multiple configurations.
76
+ # If you don't want/need to define several sub-sets in your dataset,
77
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
78
+
79
+ # If you need to make complex sub-parts in the datasets with configurable options
80
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
81
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
82
+
83
+ # You will be able to load one or the other configurations in the following list with
84
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
85
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
86
+ # BUILDER_CONFIGS = [
87
+ # datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
88
+ # datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
89
+ # ]
90
+
91
+ # DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
92
+
93
+ def _info(self):
94
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
95
+ # if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
96
+ # features = datasets.Features(
97
+ # {
98
+ # "sentence": datasets.Value("string"),
99
+ # "option1": datasets.Value("string"),
100
+ # "answer": datasets.Value("string")
101
+ # # These are the features of your dataset like images, labels ...
102
+ # }
103
+ # )
104
+ # else: # This is an example to show how to have different features for "first_domain" and "second_domain"
105
+ # features = datasets.Features(
106
+ # {
107
+ # "sentence": datasets.Value("string"),
108
+ # "option2": datasets.Value("string"),
109
+ # "second_domain_answer": datasets.Value("string")
110
+ # # These are the features of your dataset like images, labels ...
111
+ # }
112
+ # )
113
+
114
+ features = datasets.Features(
115
+ {
116
+ "image_id": datasets.Value("string"), # int (Image ids correspond to respective Visual Genome image ids)
117
+ "instance_id": datasets.Value("string"), # int (Unique instance ID)
118
+ "instance_bbox": datasets.features.Sequence(datasets.Value("float")), # [x, y, width, height] (Bounding box co-ordinates for the instance)
119
+ "instance_polygon": datasets.features.Sequence(datasets.features.Sequence(datasets.features.Sequence(datasets.Value("float")))) , # list of [x y] (List of vertices for segmentation polygon if exists else None)
120
+ "object_name": datasets.Value("string"), # str (Name of the object for the instance)
121
+ "positive_attributes": datasets.features.Sequence(datasets.Value("string")) , # list of str (Explicitly labeled positive attributes for the instance)
122
+ "negative_attributes": datasets.features.Sequence(datasets.Value("string")) # list of str (Explicitly labeled negative attributes for the instance)
123
+ }
124
+ )
125
+
126
+ return datasets.DatasetInfo(
127
+ # This is the description that will appear on the datasets page.
128
+ description=_DESCRIPTION,
129
+ # This defines the different columns of the dataset and their types
130
+ features=features, # Here we define them above because they are different between the two configurations
131
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
132
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
133
+ # supervised_keys=("sentence", "label"),
134
+ # Homepage of the dataset for documentation
135
+ homepage=_HOMEPAGE,
136
+ # License for the dataset if available
137
+ license=_LICENSE,
138
+ # Citation for the dataset
139
+ citation=_CITATION,
140
+ )
141
+
142
+ def _split_generators(self, dl_manager):
143
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
144
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
145
+
146
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
147
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
148
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
149
+
150
+ # urls = _URLS[self.config.name]
151
+ # data_dir = dl_manager.download_and_extract(urls)
152
+
153
+ downloaded_files = dl_manager.download_and_extract(_URLS)
154
+ print("downloaded_files: ", downloaded_files)
155
+ return [
156
+ datasets.SplitGenerator(
157
+ name=datasets.Split.TRAIN,
158
+ # These kwargs will be passed to _generate_examples
159
+ gen_kwargs={
160
+ "filepath": downloaded_files["train"],
161
+ "split": "train",
162
+ },
163
+ ),
164
+ datasets.SplitGenerator(
165
+ name=datasets.Split.VALIDATION,
166
+ # These kwargs will be passed to _generate_examples
167
+ gen_kwargs={
168
+ "filepath": downloaded_files["val"],
169
+ "split": "val",
170
+ },
171
+ ),
172
+ datasets.SplitGenerator(
173
+ name=datasets.Split.TEST,
174
+ # These kwargs will be passed to _generate_examples
175
+ gen_kwargs={
176
+ "filepath": downloaded_files["test"],
177
+ "split": "test"
178
+ },
179
+ ),
180
+ ]
181
+
182
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
183
+ def _generate_examples(self, filepath, split):
184
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
185
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
186
+ # with open(filepath, encoding="utf-8") as f:
187
+ # for key, row in enumerate(f):
188
+ # data = json.loads(row)
189
+ # if self.config.name == "first_domain":
190
+ # # Yields examples as (key, example) tuples
191
+ # yield key, {
192
+ # "sentence": data["sentence"],
193
+ # "option1": data["option1"],
194
+ # "answer": "" if split == "test" else data["answer"],
195
+ # }
196
+ # else:
197
+ # yield key, {
198
+ # "sentence": data["sentence"],
199
+ # "option2": data["option2"],
200
+ # "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
201
+ # }
202
+
203
+ if split == "train":
204
+ # concat part1 and part 2 files
205
+ part1_data = json.load(open(filepath['part1'], encoding="utf-8"))
206
+ part2_data = json.load(open(filepath['part2'], encoding="utf-8"))
207
+ data = part1_data + part2_data
208
+ else:
209
+ data = json.load(open(filepath, encoding="utf-8"))
210
+
211
+ for key, row in enumerate(data):
212
+ yield key, {
213
+ "image_id": row["image_id"],
214
+ "instance_id": row["instance_id"],
215
+ "instance_bbox": row["instance_bbox"],
216
+ "instance_polygon": row["instance_polygon"],
217
+ "object_name": row["object_name"],
218
+ "positive_attributes": row["positive_attributes"],
219
+ "negative_attributes": row["negative_attributes"]
220
+ }