Datasets:
File size: 16,609 Bytes
1b645bf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 |
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quasar: Datasets for Question Answering by Search and Reading"""
import gzip
import datasets
import json
from collections import defaultdict
from tqdm import tqdm
_CITATION = """\
@article{dhingra2017quasar,
title={Quasar: Datasets for Question Answering by Search and Reading},
author={Dhingra, Bhuwan and Mazaitis, Kathryn and Cohen, William W},
journal={arXiv preprint arXiv:1707.03904},
year={2017}
}
"""
_UNKNOWN_RELATION = "UNK_RELATION"
_UNKNOWN_ANS_TYPE = "UNK_ANS_TYPE"
_UNKNOWN_GENRE = "UNK_GENRE"
_QUASAR_S = "quasar-s"
_QUASAR_T = "quasar-t"
_QUASAR_T_NPS = "quasar-t-nps"
_WHITE_SPACE = " "
_DESCRIPTION = """\
We present two new large-scale datasets aimed at evaluating systems designed to comprehend a natural language query and extract its answer from a large corpus of text. The Quasar-S dataset consists of 37000 cloze-style (fill-in-the-gap) queries constructed from definitions of software entity tags on the popular website Stack Overflow. The posts and comments on the website serve as the background corpus for answering the cloze questions. The Quasar-T dataset consists of 43000 open-domain trivia questions and their answers obtained from various internet sources. ClueWeb09 serves as the background corpus for extracting these answers. We pose these datasets as a challenge for two related subtasks of factoid Question Answering: (1) searching for relevant pieces of text that include the correct answer to a query, and (2) reading the retrieved text to answer the query.
"""
_HOMEPAGE = "https://github.com/bdhingra/quasar"
_DATA_URL = "http://curtis.ml.cmu.edu/datasets/quasar"
QUASAR_S_DESC = """\
Quasar-S consists of cloze style questions over software entities. The following information is provided.
uid: Unique id
question: Text of the question
answer: Text of the answer
context_short: List[{confidence: float, content: str}]
context_long: The same as context_short, but from a different data source. see the paper for more info.
relation: For some questions in Quasar-S, the relation type between head entity of the cloze question and the answer
entity is provided. For the other questions, this field takes the value "UNK_RELATION". For example,
[question]: jarjar -- jar jar links http : code.google.com p @placeholder is a utility that
makes it easy to repackage java libraries and embed them into your own distribution .,
[answer]: jarjar,
[relationship]: synonym
"""
QUASAR_T_DESC = """\
The following information is provided.
uid: unique id
question: text of the question
answer: text of the answer
context_short: List[{confidence: float, content: str}]
context_long: The same as context_short, but from a different data source. see the paper for more info.
answer_type: Whether the answer is a date/time or number. This is known for some answers, for the others, this field
takes the value "UNK_ANS_TYPE"
genre: Whether the question is from the genre of arts or math/science. This is known for some questions, for the others,
this field takes the value "UNK_GENRE"
"""
QUASAR_T_NPS_DESC = """\
Quasar-T consists of consists of trivia questions. The following information is provided.
uid: unique id
question: text of the question
answer: text of the answer
context_short:
List[
{
confidence: float,
content: str,
content_tokens: List[str],
nps: List[{'content': str, 'start_token_id': int}]
}
]
Here, context_tokens is a whitespace tokenization of content. `nps` are contiguous chunks of NN* tagged tokens from the
context as candidate answers.
context_long: The same as context_short, but from a different data source. see the paper for more info.
answer_type: Whether the answer is a date/time or number. This is known for some answers, for the others, this field
takes the value "UNK_ANS_TYPE"
genre: Whether the question is from the genre of arts or math/science. This is known for some questions, for the others,
this field takes the value "UNK_GENRE"
"""
class Quasar(datasets.GeneratorBasedBuilder):
"""MCTest: Machine comprehension test: http://research.microsoft.com/mct"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name=_QUASAR_S,
version=VERSION,
description=QUASAR_S_DESC,
),
datasets.BuilderConfig(
name=_QUASAR_T,
version=VERSION,
description=QUASAR_T_DESC,
),
datasets.BuilderConfig(
name=_QUASAR_T_NPS,
version=VERSION,
description=QUASAR_T_NPS_DESC,
)
]
DEFAULT_CONFIG_NAME = _QUASAR_S
def _info(self):
features = datasets.Features(
{
"uid": datasets.Value("string"),
"question": datasets.Value("string"),
"context_short": datasets.Sequence(
dict(
{
"confidence": datasets.Value("float"),
"content": datasets.Value("string")
}
)),
"context_long": datasets.Sequence(
dict(
{
"confidence": datasets.Value("float"),
"content": datasets.Value("string")
}
)),
"tags": datasets.Sequence(datasets.Value("string")),
"answer": datasets.Value("string"),
}
)
# for some questions in Quasar-S, relation type between head entity of the cloze question and the answer entity
# is provided. For the other questions, we provide an UNK
# [relationship]: component-of, [question]: putchar -- anything related to c or @placeholder functions putchar
# c or std : : putchar c++ ., [answer]: c++-standard-library
# [relationship]: synonym, [question]: jarjar -- jar jar links http : code.google.com p @placeholder is a
# utility that makes it easy to repackage java libraries and embed them into your own distribution .,
# [answer]: jarjar
# [relationship]: runs-on, [question]: web-audio -- web-audio is a javascript api providing low-level
# low-latency audio playback and manipulation functions in html5 capable @placeholder browsers ., [answer]: web
# [relationship]: used-with, [question]: audio-video-sync -- questions related to synchronization between audio
# and @placeholder during creation transmission reception and playback of content with both audio and video .,
# [answer]: video
if self.config.name == _QUASAR_S:
features.update({
"relation": datasets.Value("string")
})
elif self.config.name.startswith(_QUASAR_T):
features.update({
"answer_type": datasets.Value("string"),
"genre": datasets.Value("string")
})
# (only for quasar-T): We also provide contiguous chunks of
# NN* tagged tokens from the context as candidate answers (only for quasar-T).
# Again each line corresponds to the question in <split>_questions.json.gz,
# in the format:
# {
# "nps": [
# ...
# [
# "aerosol spray",
# 69,
# 29
# ],
# ],
# "uid": "s3q41931"
# }
#
# Each element in "nps" is a list with three elements -
# [candidate, context_id, token_id]. The context_id is the index into the
# list of context documents, and token_id is the position of the start of
# the np in the context, when tokenized by white-space. Both are 0-based
# indices.
#
# If the correct answer is not detected as an NN* chunk we add it to the
# list of NPs above. The context_id and token_id are set to -1 in this
# case.
# since this will increase the size by quite a bit, we use a separate configuration for this, called
# quasar-t-nps
if self.config.name == _QUASAR_T_NPS:
for _type in ["short", "long"]:
features[f"context_{_type}"] = datasets.Sequence(
dict(
{
"confidence": datasets.Value("float"),
"content": datasets.Value("string"),
"content_tokens": datasets.Sequence(datasets.Value("string")),
"nps": datasets.Sequence(dict(
{
"content": datasets.Value("string"),
"start_token_id": datasets.Value("int32")
}
))
}
)
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
paths = {}
phases = ["train", "dev", "test"]
if self.config.name == _QUASAR_S:
data_path = f"{_DATA_URL}/{_QUASAR_S}"
for phase in phases:
paths[phase] = {
"qa": dl_manager.download(f"{data_path}/questions/{phase}_questions.json.gz"),
"contexts_long": dl_manager.download(f"{data_path}/contexts/long/{phase}_contexts.json.gz"),
"contexts_short": dl_manager.download(f"{data_path}/contexts/short/{phase}_contexts.json.gz"),
}
paths["relations"] = dl_manager.download(f"{data_path}/relation_annotations.json")
elif self.config.name.startswith(_QUASAR_T):
data_path = f"{_DATA_URL}/{_QUASAR_T}"
for phase in phases:
paths[phase] = {
"qa": dl_manager.download(f"{data_path}/questions/{phase}_questions.json.gz"),
"contexts_long": dl_manager.download(f"{data_path}/contexts/long/{phase}_contexts.json.gz"),
"contexts_short": dl_manager.download(f"{data_path}/contexts/short/{phase}_contexts.json.gz"),
}
paths["answer_types"] = dl_manager.download(f"{data_path}/answer_annotations.json")
paths["genres"] = dl_manager.download(f"{data_path}/genre_annotations.json")
if self.config.name == _QUASAR_T_NPS:
for phase in phases:
paths[phase].update(
{
"nps_long": dl_manager.download(f"{data_path}/contexts/long/{phase}_nps.json.gz"),
"nps_short": dl_manager.download(f"{data_path}/contexts/short/{phase}_nps.json.gz"),
}
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": paths, "phase": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": paths, "phase": "dev"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": paths, "phase": "test"},
),
]
@staticmethod
def _read_file(path):
"""
read a json.gz file
:param path:
:return:
"""
with gzip.open(path) as rf:
for line in rf:
yield eval(line)
@staticmethod
def _invert_dict(_dict):
"""
converts a dict of Dict[str, List[str]] to Dict[str, str], where each key in the new dict is one of the
values in the original dict
:param _dict:
:return:
"""
_d = {}
for k, v in _dict.items():
for _v in v:
_d[_v] = k
return _d
@staticmethod
def _get_nps(nps, context_sentences):
np_sentence_dict = defaultdict(list)
for candidate, context_id, token_id in nps:
np_sentence_dict[context_id].append((candidate, token_id))
_context_sentences = [{
"confidence": context_sentence["confidence"],
"content": context_sentence["content"],
"content_tokens": context_sentence["content"].split(_WHITE_SPACE),
"nps": [{"content": np[0], "start_token_id": np[1]} for np in np_sentence_dict[index]]
} for index, context_sentence in enumerate(context_sentences)]
return _context_sentences
@staticmethod
def _get_base_datum(qa, context_long, context_short):
uid = qa["uid"]
assert context_long["uid"] == uid
assert context_short["uid"] == uid
context_long = [{"confidence": context[0], "content": context[1]} for context in context_long["contexts"]]
context_short = [{"confidence": context[0], "content": context[1]} for context in context_short["contexts"]]
return {
"uid": qa["uid"],
"question": qa["question"],
"context_short": context_short,
"context_long": context_long,
"tags": qa["tags"],
"answer": qa["answer"]
}
def _generate_examples(self, filepath, phase):
qas = self._read_file(filepath[phase]["qa"])
contexts_long = self._read_file(filepath[phase]["contexts_long"])
contexts_short = self._read_file(filepath[phase]["contexts_short"])
if self.config.name == _QUASAR_S:
relations = self._invert_dict(json.load(open(filepath["relations"])))
for qa, context_long, context_short in zip(qas, contexts_long, contexts_short):
datum = self._get_base_datum(qa, context_long, context_short)
datum.update({"relation": relations.get(qa["uid"], _UNKNOWN_RELATION)})
yield qa["uid"], datum
elif self.config.name == _QUASAR_T:
answer_types = self._invert_dict(json.load(open(filepath["answer_types"])))
genres = self._invert_dict(json.load(open(filepath["genres"])))
for qa, context_long, context_short in zip(qas, contexts_long, contexts_short):
datum = self._get_base_datum(qa, context_long, context_short)
datum.update({"answer_type": answer_types.get(qa["uid"], _UNKNOWN_ANS_TYPE)})
datum.update({"genre": genres.get(qa["uid"], _UNKNOWN_GENRE)})
yield qa["uid"], datum
elif self.config.name == _QUASAR_T_NPS:
answer_types = self._invert_dict(json.load(open(filepath["answer_types"])))
genres = self._invert_dict(json.load(open(filepath["genres"])))
nps_long = self._read_file(filepath[phase]["nps_long"])
nps_short = self._read_file(filepath[phase]["nps_short"])
for qa, context_long, context_short, np_long, np_short in zip(qas, contexts_long, contexts_short, nps_long,
nps_short):
datum = self._get_base_datum(qa, context_long, context_short)
assert np_long["uid"] == qa["uid"]
assert np_short["uid"] == qa["uid"]
datum.update({"answer_type": answer_types.get(qa["uid"], _UNKNOWN_ANS_TYPE)})
datum.update({"genre": genres.get(qa["uid"], _UNKNOWN_GENRE)})
datum["context_long"] = self._get_nps(np_long["nps"], datum["context_long"])
datum["context_short"] = self._get_nps(np_short["nps"], datum["context_short"])
yield qa["uid"], datum
|