annotations_creators:
- other
language_creators:
- other
multilinguality:
- monolingual
source_datasets:
- original
paperswithcode_id: superglue
arxiv: 1905.00537
pretty_name: SuperGLUE Benchmark Datasets
tags:
- superglue
- nlp
- benchmark
license: mit
language:
- en
dataset_info:
- config_name: boolq
features:
- name: question
dtype: string
- name: passage
dtype: string
- name: idx
dtype: int64
- name: label
dtype: bool
splits:
- name: train
num_bytes: 6136774
num_examples: 9427
- name: validation
num_bytes: 2103781
num_examples: 3270
- name: test
num_bytes: 2093385
num_examples: 3245
download_size: 6439045
dataset_size: 10333940
- config_name: cb
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype: string
- name: idx
dtype: int64
splits:
- name: train
num_bytes: 89859
num_examples: 250
- name: validation
num_bytes: 22480
num_examples: 56
- name: test
num_bytes: 93492
num_examples: 250
download_size: 137099
dataset_size: 205831
- config_name: copa
features:
- name: premise
dtype: string
- name: choice1
dtype: string
- name: choice2
dtype: string
- name: question
dtype: string
- name: label
dtype: int64
- name: idx
dtype: int64
splits:
- name: train
num_bytes: 50833
num_examples: 400
- name: validation
num_bytes: 12879
num_examples: 100
- name: test
num_bytes: 61846
num_examples: 500
download_size: 84158
dataset_size: 125558
- config_name: multirc
features:
- name: idx
dtype: int64
- name: version
dtype: float64
- name: passage
struct:
- name: questions
list:
- name: answers
list:
- name: idx
dtype: int64
- name: label
dtype: int64
- name: text
dtype: string
- name: idx
dtype: int64
- name: question
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 2393721
num_examples: 456
- name: validation
num_bytes: 429255
num_examples: 83
- name: test
num_bytes: 858870
num_examples: 166
download_size: 2053244
dataset_size: 3681846
- config_name: record
features:
- name: source
dtype: string
- name: passage
struct:
- name: entities
list:
- name: end
dtype: int64
- name: start
dtype: int64
- name: text
dtype: string
- name: qas
list:
- name: answers
list:
- name: end
dtype: int64
- name: start
dtype: int64
- name: text
dtype: string
- name: idx
dtype: int64
- name: query
dtype: string
- name: idx
dtype: int64
splits:
- name: train
num_bytes: 110591940
num_examples: 65709
- name: validation
num_bytes: 12375907
num_examples: 7481
- name: test
num_bytes: 11509574
num_examples: 7484
download_size: 71256085
dataset_size: 134477421
- config_name: rte
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype: string
- name: idx
dtype: int64
splits:
- name: train
num_bytes: 877041
num_examples: 2490
- name: validation
num_bytes: 94010
num_examples: 277
- name: test
num_bytes: 973916
num_examples: 3000
download_size: 1269005
dataset_size: 1944967
- config_name: wic
features:
- name: word
dtype: string
- name: sentence1
dtype: string
- name: sentence2
dtype: string
- name: idx
dtype: int64
- name: label
dtype: bool
- name: start1
dtype: int64
- name: start2
dtype: int64
- name: end1
dtype: int64
- name: end2
dtype: int64
- name: version
dtype: float64
splits:
- name: train
num_bytes: 767620
num_examples: 5428
- name: validation
num_bytes: 94651
num_examples: 638
- name: test
num_bytes: 207006
num_examples: 1400
download_size: 591526
dataset_size: 1069277
- config_name: wsc
features:
- name: text
dtype: string
- name: target
struct:
- name: span1_index
dtype: int64
- name: span1_text
dtype: string
- name: span2_index
dtype: int64
- name: span2_text
dtype: string
- name: idx
dtype: int64
- name: label
dtype: bool
splits:
- name: train
num_bytes: 91597
num_examples: 554
- name: validation
num_bytes: 21950
num_examples: 104
- name: test
num_bytes: 32011
num_examples: 146
download_size: 47100
dataset_size: 145558
configs:
- config_name: boolq
data_files:
- split: train
path: boolq/train-*
- split: validation
path: boolq/validation-*
- split: test
path: boolq/test-*
- config_name: cb
data_files:
- split: train
path: cb/train-*
- split: validation
path: cb/validation-*
- split: test
path: cb/test-*
- config_name: copa
data_files:
- split: train
path: copa/train-*
- split: validation
path: copa/validation-*
- split: test
path: copa/test-*
- config_name: multirc
data_files:
- split: train
path: multirc/train-*
- split: validation
path: multirc/validation-*
- split: test
path: multirc/test-*
- config_name: record
data_files:
- split: train
path: record/train-*
- split: validation
path: record/validation-*
- split: test
path: record/test-*
- config_name: rte
data_files:
- split: train
path: rte/train-*
- split: validation
path: rte/validation-*
- split: test
path: rte/test-*
- config_name: wic
data_files:
- split: train
path: wic/train-*
- split: validation
path: wic/validation-*
- split: test
path: wic/test-*
- config_name: wsc
data_files:
- split: train
path: wsc/train-*
- split: validation
path: wsc/validation-*
- split: test
path: wsc/test-*
SuperGLUE Benchmark Datasets
This repository contains the SuperGLUE benchmark datasets uploaded to the Hugging Face Hub. Each dataset is available as a separate configuration, making it easy to load individual datasets using the datasets library.
Datasets Included
The repository includes the following SuperGLUE datasets:
- BoolQ
- CB
- COPA
- MultiRC
- ReCoRD
- RTE
- WiC
- WSC
Each dataset has been preprocessed to ensure consistency across train, validation, and test splits. Missing keys in the test split have been filled with dummy values (type-aware) to match the features found in the training and validation splits.
Usage
You can load any of the datasets using the Hugging Face datasets
library. For example, to load the BoolQ dataset, run:
from datasets import load_dataset
# Load the BoolQ dataset from the SuperGLUE benchmark
dataset = load_dataset("Hyukkyu/superglue", "BoolQ")
# Access train, validation, and test splits
train_split = dataset["train"]
validation_split = dataset["validation"]
test_split = dataset["test"]
print(train_split)
Replace "BoolQ" with the desired configuration name (e.g., "CB", "COPA", "MultiRC", etc.) to load other datasets.
Data Processing
- Schema Consistency: A recursive procedure was used to infer the schema from the train and validation splits and fill in missing keys in the test split with dummy values. This ensures that all splits have the same features, preventing issues during model training or evaluation.
- Type-Aware Dummy Values: Dummy values are inserted based on the expected type. For instance, missing boolean fields are filled with False, integer fields with -1, float fields with -1.0, and string fields with an empty string.
Citation
@article{wang2019superglue,
title={Superglue: A stickier benchmark for general-purpose language understanding systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel},
journal={Advances in neural information processing systems},
volume={32},
year={2019}
}