Last commit not found
# coding=utf-8 | |
# Copyright 2020 BigScience Contributors. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""P3 (Public Pool of Prompts)""" | |
import datasets | |
import json | |
import urllib | |
from collections import defaultdict | |
import tensorflow as tf | |
_CITATION = """\ | |
TODO""" | |
_DESCRIPTION = """\ | |
P3 (Public Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2). | |
Prompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource). | |
To train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.** | |
""" | |
_LICENSE = "Apache License 2.0" | |
_HOMEPAGE = "https://github.com/bigscience-workshop/promptsource" | |
_DATA_PATH = "data" | |
_HUB_PATH = "https://huggingface.co/datasets/bigscience/P3/raw/main" | |
logger = datasets.logging.get_logger(__name__) | |
def load_cached_task(features_dict, tfrecord): | |
# Use `FixedLenSequenceFeature` for sequences with variable length. | |
def _feature_config(shape, dtype): | |
if dtype in ("int32", "bool"): | |
# int32 and bool are stored as int64 in the tf.train.Example protobuf. | |
dtype = "int64" | |
if shape and shape[0] is None: | |
return tf.io.FixedLenSequenceFeature( | |
shape[1:], dtype, allow_missing=True | |
) | |
return tf.io.FixedLenFeature(shape, dtype) | |
feature_description = { | |
feat: _feature_config(**desc) for feat, desc in features_dict.items() | |
} | |
ds = tf.data.TFRecordDataset(tf.io.gfile.glob([tfrecord])) # TODO -> handle multiple shards | |
ds = ds.map( | |
lambda pb: tf.io.parse_single_example(pb, feature_description), | |
num_parallel_calls=tf.data.experimental.AUTOTUNE | |
) | |
# Cast features back to the types from the info JSON since some features | |
# must be cast for storage (e.g., int32 is stored as int64). | |
ds = ds.map( | |
lambda x: {k: tf.cast(v, features_dict[k]["dtype"]) for k, v in x.items()}, | |
num_parallel_calls=tf.data.experimental.AUTOTUNE | |
) | |
return ds | |
def read_from_url(url): | |
# TODO: there might be a better way to handle these downloads (especially regarding caching). | |
# TODO: Ultimately, we should rely on the cache if internet is not available. | |
try: | |
content = urllib.request.urlopen(url, timeout=10.0) | |
logger.info(f"Downloaded {url}") | |
except urllib.error.URLError as e: | |
raise ConnectionError(e) | |
return content.read().decode("utf-8") | |
def find_task_splits_and_features_dict(): | |
"""Get the task available (list was pre-computed by `print_data_split_sizes.py`), and get the features for each task.""" | |
task_splits_and_features = defaultdict(dict) | |
data_split_sizes = read_from_url(f"{_HUB_PATH}/data_split_sizes.csv") | |
data_split_sizes = [t.strip() for t in data_split_sizes.splitlines()] | |
data_split_sizes = data_split_sizes[1:] | |
data_split_sizes = [t.split("|") for t in data_split_sizes] | |
data_split_sizes = [(t[0], json.loads(t[1])) for t in data_split_sizes] | |
for task_name, split_sizes in data_split_sizes: | |
for split_name in split_sizes.keys(): | |
split_info = json.loads( | |
read_from_url( | |
f"{_HUB_PATH}/data/{task_name}/info.{split_name}.json" | |
) | |
) | |
features_dict = split_info["features"] | |
assert split_info["num_shards"] == 1 # TODO -> handle multiple shards | |
if not task_splits_and_features[task_name]: | |
task_splits_and_features[task_name] = { | |
"splits": [], | |
"features_dict": features_dict, | |
} | |
task_splits_and_features[task_name]["splits"].append(split_name) | |
assert features_dict == task_splits_and_features[task_name]["features_dict"] | |
return task_splits_and_features | |
_TASK_SPLITS_AND_FEATURES_DICT = find_task_splits_and_features_dict() | |
_URLs = { | |
task_name: { | |
split_name: { | |
"tfrecord": f"{_DATA_PATH}/{task_name}/{split_name}.tfrecord-00000-of-00001", # TODO -> handle multiple shards | |
} | |
for split_name in splits_and_features_dict["splits"] | |
} | |
for task_name, splits_and_features_dict in _TASK_SPLITS_AND_FEATURES_DICT.items() | |
} | |
class P3Config(datasets.BuilderConfig): | |
"""BuilderConfig for P3.""" | |
def __init__(self, splits, features_dict, score_eval, **kwargs): | |
"""BuilderConfig for P3. | |
Args: | |
splits: `List[str]`, the lists of splits which are available for this task | |
features_dict: `dict`, the dict of features for this task | |
score_eval: `bool`, whether this is task formulated as a rank classification problem | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
# Version history: | |
# 0.1 initial commit | |
super(P3Config, self).__init__(version=datasets.Version("0.1.0"), **kwargs) | |
self.splits = splits | |
self.features_dict = features_dict | |
self.score_eval = score_eval | |
class P3(datasets.GeneratorBasedBuilder): | |
"""Subset of P3 used in `Multitask Prompted Training Enables Zero-Shot Task Generalization`""" | |
BUILDER_CONFIGS = [ | |
P3Config( | |
name=task_name, | |
splits=splits_and_features_dict["splits"], | |
features_dict=splits_and_features_dict["features_dict"], | |
score_eval=task_name.endswith("score_eval") | |
) | |
for task_name, splits_and_features_dict in _TASK_SPLITS_AND_FEATURES_DICT.items() | |
] | |
def _info(self): | |
# All features available are: 'inputs', 'inputs_pretokenized', 'targets', | |
# 'targets_pretokenized', 'idx', 'is_correct', 'weight', and 'answer_choices' | |
_FEAT_MAPPING = { | |
"answer_choices": datasets.Sequence(datasets.Value("string")), | |
"inputs": datasets.Sequence(datasets.Value("int32")), | |
"inputs_pretokenized": datasets.Value("string"), | |
"targets": datasets.Sequence(datasets.Value("int32")), | |
"targets_pretokenized": datasets.Value("string"), | |
"idx": datasets.Sequence(datasets.Value("int32")), | |
"weight": datasets.Value("float32"), | |
"is_correct": datasets.Value("bool"), | |
} | |
features = {} | |
for feat_name in self.config.features_dict.keys(): | |
features[feat_name] = _FEAT_MAPPING[feat_name] | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features(features), | |
supervised_keys=None, | |
homepage=_HOMEPAGE, | |
citation=_CITATION, | |
license=_LICENSE, | |
) | |
def _split_generators(self, dl_manager): | |
split_generators = [] | |
data_dir = dl_manager.download_and_extract(_URLs) | |
task_name = self.config.name | |
if "train" in self.config.splits: | |
split_name = "train" | |
split_generators.append( | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"tfrecord": data_dir[task_name][split_name]["tfrecord"], | |
} | |
) | |
) | |
if "validation" in self.config.splits: | |
split_name = "validation" | |
split_generators.append( | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
gen_kwargs={ | |
"tfrecord": data_dir[task_name][split_name]["tfrecord"], | |
} | |
) | |
) | |
if "test" in self.config.splits: | |
split_name = "test" | |
split_generators.append( | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={ | |
"tfrecord": data_dir[task_name][split_name]["tfrecord"], | |
} | |
) | |
) | |
# Handle splits that are not train, validation or test | |
special_splits = set(self.config.splits) - set(["train", "validation", "test"]) | |
for special_split_name in special_splits: | |
split_generators.append( | |
datasets.SplitGenerator( | |
name=datasets.Split(special_split_name), | |
gen_kwargs={ | |
"tfrecord": data_dir[task_name][special_split_name]["tfrecord"], | |
} | |
) | |
) | |
return split_generators | |
def _generate_examples(self, tfrecord): | |
"""This function returns the examples in the raw (text) form.""" | |
_FEAT_MAPPING_FUNCTIONS = { | |
"answer_choices": lambda x: [choice.decode("utf-8") for choice in x], | |
"inputs": lambda x: x.tolist(), | |
"inputs_pretokenized": lambda x: x.decode("utf-8"), | |
"targets": lambda x: x.tolist(), | |
"targets_pretokenized": lambda x: x.decode("utf-8"), | |
"idx": lambda x: x.tolist(), | |
"weight": lambda x: float(x), | |
"is_correct": lambda x: x, | |
} | |
key = 0 | |
features_dict = self.config.features_dict | |
ds = load_cached_task(features_dict, tfrecord) | |
for ex in ds.as_numpy_iterator(): | |
ex_dict = {} | |
for feat_name, feat_value in ex.items(): | |
ex_dict[feat_name] = _FEAT_MAPPING_FUNCTIONS[feat_name](feat_value) | |
yield key, ex_dict | |
key += 1 | |