Datasets:
Tasks:
Question Answering
Modalities:
Text
Formats:
parquet
Sub-tasks:
extractive-qa
Languages:
Persian
Size:
1K - 10K
ArXiv:
License:
Convert dataset to Parquet (#3)
Browse files- Convert dataset to Parquet (01fdde27e129e5ea4e89f98a4ba62917e5ad11b2)
- Delete loading script (b023a2bc2f6e9345b2f741026a16b30b5786eee8)
README.md
CHANGED
|
@@ -17,9 +17,9 @@ task_categories:
|
|
| 17 |
- question-answering
|
| 18 |
task_ids:
|
| 19 |
- extractive-qa
|
| 20 |
-
paperswithcode_id: null
|
| 21 |
pretty_name: PersiNLU (Reading Comprehension)
|
| 22 |
dataset_info:
|
|
|
|
| 23 |
features:
|
| 24 |
- name: question
|
| 25 |
dtype: string
|
|
@@ -33,10 +33,9 @@ dataset_info:
|
|
| 33 |
dtype: int32
|
| 34 |
- name: answer_text
|
| 35 |
dtype: string
|
| 36 |
-
config_name: parsinlu-repo
|
| 37 |
splits:
|
| 38 |
- name: train
|
| 39 |
-
num_bytes:
|
| 40 |
num_examples: 600
|
| 41 |
- name: test
|
| 42 |
num_bytes: 674711
|
|
@@ -44,8 +43,18 @@ dataset_info:
|
|
| 44 |
- name: validation
|
| 45 |
num_bytes: 163161
|
| 46 |
num_examples: 125
|
| 47 |
-
download_size:
|
| 48 |
dataset_size: 1585527
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
---
|
| 50 |
|
| 51 |
# Dataset Card for PersiNLU (Reading Comprehension)
|
|
|
|
| 17 |
- question-answering
|
| 18 |
task_ids:
|
| 19 |
- extractive-qa
|
|
|
|
| 20 |
pretty_name: PersiNLU (Reading Comprehension)
|
| 21 |
dataset_info:
|
| 22 |
+
config_name: parsinlu-repo
|
| 23 |
features:
|
| 24 |
- name: question
|
| 25 |
dtype: string
|
|
|
|
| 33 |
dtype: int32
|
| 34 |
- name: answer_text
|
| 35 |
dtype: string
|
|
|
|
| 36 |
splits:
|
| 37 |
- name: train
|
| 38 |
+
num_bytes: 747655
|
| 39 |
num_examples: 600
|
| 40 |
- name: test
|
| 41 |
num_bytes: 674711
|
|
|
|
| 43 |
- name: validation
|
| 44 |
num_bytes: 163161
|
| 45 |
num_examples: 125
|
| 46 |
+
download_size: 777432
|
| 47 |
dataset_size: 1585527
|
| 48 |
+
configs:
|
| 49 |
+
- config_name: parsinlu-repo
|
| 50 |
+
data_files:
|
| 51 |
+
- split: train
|
| 52 |
+
path: parsinlu-repo/train-*
|
| 53 |
+
- split: test
|
| 54 |
+
path: parsinlu-repo/test-*
|
| 55 |
+
- split: validation
|
| 56 |
+
path: parsinlu-repo/validation-*
|
| 57 |
+
default: true
|
| 58 |
---
|
| 59 |
|
| 60 |
# Dataset Card for PersiNLU (Reading Comprehension)
|
parsinlu-repo/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f35779d9faf2121f5953df218d49fa3f69cd3d52f3d024a60d2818d4378bf144
|
| 3 |
+
size 322212
|
parsinlu-repo/train-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bbe7e80fcc73c488a0c5839ee9b4b16d12bc40d3295a98f71897113b6aeb7415
|
| 3 |
+
size 363239
|
parsinlu-repo/validation-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3cb86eed5f155eef8bf7428d2680da48be30e0ac737c353e7f30729fed260ec5
|
| 3 |
+
size 91981
|
parsinlu_reading_comprehension.py
DELETED
|
@@ -1,141 +0,0 @@
|
|
| 1 |
-
# coding=utf-8
|
| 2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
| 3 |
-
#
|
| 4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
-
# you may not use this file except in compliance with the License.
|
| 6 |
-
# You may obtain a copy of the License at
|
| 7 |
-
#
|
| 8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
-
#
|
| 10 |
-
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
-
# See the License for the specific language governing permissions and
|
| 14 |
-
# limitations under the License.
|
| 15 |
-
"""ParsiNLU Persian reading comprehension task"""
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
import json
|
| 19 |
-
|
| 20 |
-
import datasets
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
logger = datasets.logging.get_logger(__name__)
|
| 24 |
-
|
| 25 |
-
_CITATION = """\
|
| 26 |
-
@article{huggingface:dataset,
|
| 27 |
-
title = {ParsiNLU: A Suite of Language Understanding Challenges for Persian},
|
| 28 |
-
authors = {Khashabi, Daniel and Cohan, Arman and Shakeri, Siamak and Hosseini, Pedram and Pezeshkpour, Pouya and Alikhani, Malihe and Aminnaseri, Moin and Bitaab, Marzieh and Brahman, Faeze and Ghazarian, Sarik and others},
|
| 29 |
-
year={2020}
|
| 30 |
-
journal = {arXiv e-prints},
|
| 31 |
-
eprint = {2012.06154},
|
| 32 |
-
}
|
| 33 |
-
"""
|
| 34 |
-
|
| 35 |
-
# You can copy an official description
|
| 36 |
-
_DESCRIPTION = """\
|
| 37 |
-
A Persian reading comprehenion task (generating an answer, given a question and a context paragraph).
|
| 38 |
-
The questions are mined using Google auto-complete, their answers and the corresponding evidence documents are manually annotated by native speakers.
|
| 39 |
-
"""
|
| 40 |
-
|
| 41 |
-
_HOMEPAGE = "https://github.com/persiannlp/parsinlu/"
|
| 42 |
-
|
| 43 |
-
_LICENSE = "CC BY-NC-SA 4.0"
|
| 44 |
-
|
| 45 |
-
_URL = "https://raw.githubusercontent.com/persiannlp/parsinlu/master/data/reading_comprehension/"
|
| 46 |
-
_URLs = {
|
| 47 |
-
"train": _URL + "train.jsonl",
|
| 48 |
-
"dev": _URL + "dev.jsonl",
|
| 49 |
-
"test": _URL + "eval.jsonl",
|
| 50 |
-
}
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
class ParsinluReadingComprehension(datasets.GeneratorBasedBuilder):
|
| 54 |
-
"""ParsiNLU Persian reading comprehension task."""
|
| 55 |
-
|
| 56 |
-
VERSION = datasets.Version("1.0.0")
|
| 57 |
-
|
| 58 |
-
BUILDER_CONFIGS = [
|
| 59 |
-
datasets.BuilderConfig(
|
| 60 |
-
name="parsinlu-repo", version=VERSION, description="ParsiNLU repository: reading-comprehension"
|
| 61 |
-
),
|
| 62 |
-
]
|
| 63 |
-
|
| 64 |
-
def _info(self):
|
| 65 |
-
features = datasets.Features(
|
| 66 |
-
{
|
| 67 |
-
"question": datasets.Value("string"),
|
| 68 |
-
"url": datasets.Value("string"),
|
| 69 |
-
"context": datasets.Value("string"),
|
| 70 |
-
"answers": datasets.features.Sequence(
|
| 71 |
-
{
|
| 72 |
-
"answer_start": datasets.Value("int32"),
|
| 73 |
-
"answer_text": datasets.Value("string"),
|
| 74 |
-
}
|
| 75 |
-
),
|
| 76 |
-
}
|
| 77 |
-
)
|
| 78 |
-
|
| 79 |
-
return datasets.DatasetInfo(
|
| 80 |
-
# This is the description that will appear on the datasets page.
|
| 81 |
-
description=_DESCRIPTION,
|
| 82 |
-
# This defines the different columns of the dataset and their types
|
| 83 |
-
features=features, # Here we define them above because they are different between the two configurations
|
| 84 |
-
# If there's a common (input, target) tuple from the features,
|
| 85 |
-
# specify them here. They'll be used if as_supervised=True in
|
| 86 |
-
# builder.as_dataset.
|
| 87 |
-
supervised_keys=None,
|
| 88 |
-
# Homepage of the dataset for documentation
|
| 89 |
-
homepage=_HOMEPAGE,
|
| 90 |
-
# License for the dataset if available
|
| 91 |
-
license=_LICENSE,
|
| 92 |
-
# Citation for the dataset
|
| 93 |
-
citation=_CITATION,
|
| 94 |
-
)
|
| 95 |
-
|
| 96 |
-
def _split_generators(self, dl_manager):
|
| 97 |
-
data_dir = dl_manager.download_and_extract(_URLs)
|
| 98 |
-
return [
|
| 99 |
-
datasets.SplitGenerator(
|
| 100 |
-
name=datasets.Split.TRAIN,
|
| 101 |
-
# These kwargs will be passed to _generate_examples
|
| 102 |
-
gen_kwargs={
|
| 103 |
-
"filepath": data_dir["train"],
|
| 104 |
-
"split": "train",
|
| 105 |
-
},
|
| 106 |
-
),
|
| 107 |
-
datasets.SplitGenerator(
|
| 108 |
-
name=datasets.Split.TEST,
|
| 109 |
-
# These kwargs will be passed to _generate_examples
|
| 110 |
-
gen_kwargs={"filepath": data_dir["test"], "split": "test"},
|
| 111 |
-
),
|
| 112 |
-
datasets.SplitGenerator(
|
| 113 |
-
name=datasets.Split.VALIDATION,
|
| 114 |
-
# These kwargs will be passed to _generate_examples
|
| 115 |
-
gen_kwargs={
|
| 116 |
-
"filepath": data_dir["dev"],
|
| 117 |
-
"split": "dev",
|
| 118 |
-
},
|
| 119 |
-
),
|
| 120 |
-
]
|
| 121 |
-
|
| 122 |
-
def _generate_examples(self, filepath, split):
|
| 123 |
-
logger.info("generating examples from = %s", filepath)
|
| 124 |
-
|
| 125 |
-
def get_answer_index(passage, answer):
|
| 126 |
-
return passage.index(answer) if answer in passage else -1
|
| 127 |
-
|
| 128 |
-
with open(filepath, encoding="utf-8") as f:
|
| 129 |
-
for id_, row in enumerate(f):
|
| 130 |
-
data = json.loads(row)
|
| 131 |
-
answer = data["answers"]
|
| 132 |
-
if type(answer[0]) == str:
|
| 133 |
-
answer = [{"answer_start": get_answer_index(data["passage"], x), "answer_text": x} for x in answer]
|
| 134 |
-
else:
|
| 135 |
-
answer = [{"answer_start": x[0], "answer_text": x[1]} for x in answer]
|
| 136 |
-
yield id_, {
|
| 137 |
-
"question": data["question"],
|
| 138 |
-
"url": str(data["url"]),
|
| 139 |
-
"context": data["passage"],
|
| 140 |
-
"answers": answer,
|
| 141 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|