Upload ImageRewardDB.py with huggingface_hub
Browse files- ImageRewardDB.py +17 -18
ImageRewardDB.py
CHANGED
|
@@ -11,7 +11,7 @@
|
|
| 11 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
# See the License for the specific language governing permissions and
|
| 13 |
# limitations under the License.
|
| 14 |
-
|
| 15 |
"""TODO: Add a description here."""
|
| 16 |
|
| 17 |
|
|
@@ -23,31 +23,33 @@ import datasets
|
|
| 23 |
from huggingface_hub import hf_hub_url
|
| 24 |
|
| 25 |
|
| 26 |
-
# TODO: Add BibTeX citation
|
| 27 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
| 28 |
_CITATION = """\
|
| 29 |
-
@
|
| 30 |
-
title
|
| 31 |
-
author={
|
| 32 |
-
},
|
| 33 |
-
|
|
|
|
|
|
|
| 34 |
}
|
| 35 |
"""
|
| 36 |
|
| 37 |
-
# TODO: Add description of the dataset here
|
| 38 |
# You can copy an official description
|
| 39 |
_DESCRIPTION = """\
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
"""
|
| 42 |
|
| 43 |
-
# TODO: Add a link to an official homepage for the dataset here
|
| 44 |
_HOMEPAGE = "https://huggingface.co/datasets/wuyuchen/ImageRewardDB"
|
| 45 |
_VERSION = datasets.Version("1.0.0")
|
| 46 |
|
| 47 |
-
|
| 48 |
-
_LICENSE = ""
|
| 49 |
|
| 50 |
-
# TODO: Add link to the official dataset URLs here
|
| 51 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
| 52 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
| 53 |
_REPO_ID = "wuyuchen/ImageRewardDB"
|
|
@@ -84,9 +86,8 @@ class ImageRewardDBConfig(datasets.BuilderConfig):
|
|
| 84 |
super(ImageRewardDBConfig, self).__init__(version=_VERSION, **kwargs)
|
| 85 |
self.part_ids = part_ids
|
| 86 |
|
| 87 |
-
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
| 88 |
class ImageRewardDB(datasets.GeneratorBasedBuilder):
|
| 89 |
-
"""
|
| 90 |
|
| 91 |
# This is an example of a dataset with multiple configurations.
|
| 92 |
# If you don't want/need to define several sub-sets in your dataset,
|
|
@@ -99,6 +100,7 @@ class ImageRewardDB(datasets.GeneratorBasedBuilder):
|
|
| 99 |
# You will be able to load one or the other configurations in the following list with
|
| 100 |
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
| 101 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
|
|
|
| 102 |
BUILDER_CONFIGS = []
|
| 103 |
|
| 104 |
for num_k in [1,2,4,8]:
|
|
@@ -114,7 +116,6 @@ class ImageRewardDB(datasets.GeneratorBasedBuilder):
|
|
| 114 |
DEFAULT_CONFIG_NAME = "8k" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 115 |
|
| 116 |
def _info(self):
|
| 117 |
-
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
| 118 |
features = datasets.Features(
|
| 119 |
{
|
| 120 |
"image": datasets.Image(),
|
|
@@ -145,7 +146,6 @@ class ImageRewardDB(datasets.GeneratorBasedBuilder):
|
|
| 145 |
)
|
| 146 |
|
| 147 |
def _split_generators(self, dl_manager):
|
| 148 |
-
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
| 149 |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
| 150 |
|
| 151 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
|
@@ -195,7 +195,6 @@ class ImageRewardDB(datasets.GeneratorBasedBuilder):
|
|
| 195 |
|
| 196 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
| 197 |
def _generate_examples(self, split, data_dirs, json_paths, metadata_path):
|
| 198 |
-
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
| 199 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
| 200 |
|
| 201 |
num_data_dirs = len(data_dirs)
|
|
|
|
| 11 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
# See the License for the specific language governing permissions and
|
| 13 |
# limitations under the License.
|
| 14 |
+
|
| 15 |
"""TODO: Add a description here."""
|
| 16 |
|
| 17 |
|
|
|
|
| 23 |
from huggingface_hub import hf_hub_url
|
| 24 |
|
| 25 |
|
|
|
|
| 26 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
| 27 |
_CITATION = """\
|
| 28 |
+
@misc{xu2023imagereward,
|
| 29 |
+
title={ImageReward: Learning and Evaluating Human Preferences for Text-to-Image Generation},
|
| 30 |
+
author={Jiazheng Xu and Xiao Liu and Yuchen Wu and Yuxuan Tong and Qinkai Li and Ming Ding and Jie Tang and Yuxiao Dong},
|
| 31 |
+
year={2023},
|
| 32 |
+
eprint={2304.05977},
|
| 33 |
+
archivePrefix={arXiv},
|
| 34 |
+
primaryClass={cs.CV}
|
| 35 |
}
|
| 36 |
"""
|
| 37 |
|
|
|
|
| 38 |
# You can copy an official description
|
| 39 |
_DESCRIPTION = """\
|
| 40 |
+
We systematically identify the challenges for text-to-image human preference annotation, and \
|
| 41 |
+
consequently design a pipeline tailored for it, establishing criteria for quantitative assessment and \
|
| 42 |
+
annotator training, optimizing labeling experience, and ensuring quality validation. We build this \
|
| 43 |
+
text-to-image comparison dataset, ImageRewardDB, for training the ImageReward model based on the pipeline.\
|
| 44 |
+
The ImageRewarDB covers both the rating and ranking components, collecting a dataset of 137k expert \
|
| 45 |
+
comparisons to date.
|
| 46 |
"""
|
| 47 |
|
|
|
|
| 48 |
_HOMEPAGE = "https://huggingface.co/datasets/wuyuchen/ImageRewardDB"
|
| 49 |
_VERSION = datasets.Version("1.0.0")
|
| 50 |
|
| 51 |
+
_LICENSE = "apache-2.0"
|
|
|
|
| 52 |
|
|
|
|
| 53 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
| 54 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
| 55 |
_REPO_ID = "wuyuchen/ImageRewardDB"
|
|
|
|
| 86 |
super(ImageRewardDBConfig, self).__init__(version=_VERSION, **kwargs)
|
| 87 |
self.part_ids = part_ids
|
| 88 |
|
|
|
|
| 89 |
class ImageRewardDB(datasets.GeneratorBasedBuilder):
|
| 90 |
+
"""A dataset of 137k expert comparisons to date, demonstrating the text-to-image human preference."""
|
| 91 |
|
| 92 |
# This is an example of a dataset with multiple configurations.
|
| 93 |
# If you don't want/need to define several sub-sets in your dataset,
|
|
|
|
| 100 |
# You will be able to load one or the other configurations in the following list with
|
| 101 |
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
| 102 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
| 103 |
+
|
| 104 |
BUILDER_CONFIGS = []
|
| 105 |
|
| 106 |
for num_k in [1,2,4,8]:
|
|
|
|
| 116 |
DEFAULT_CONFIG_NAME = "8k" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 117 |
|
| 118 |
def _info(self):
|
|
|
|
| 119 |
features = datasets.Features(
|
| 120 |
{
|
| 121 |
"image": datasets.Image(),
|
|
|
|
| 146 |
)
|
| 147 |
|
| 148 |
def _split_generators(self, dl_manager):
|
|
|
|
| 149 |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
| 150 |
|
| 151 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
|
|
|
| 195 |
|
| 196 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
| 197 |
def _generate_examples(self, split, data_dirs, json_paths, metadata_path):
|
|
|
|
| 198 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
| 199 |
|
| 200 |
num_data_dirs = len(data_dirs)
|