Datasets:

Languages:
Turkish
License:
e-budur commited on
Commit
5cf6fa6
·
1 Parent(s): c4a2037

Create squad_tr.py

Browse files
Files changed (1) hide show
  1. squad_tr.py +206 -0
squad_tr.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ # This file is based off of the dataset loader script for the original SQuAD2.0
17
+ # dataset.
18
+ #
19
+ # https://huggingface.co/datasets/squad_v2
20
+
21
+ # Lint as: python3
22
+ """SQuAD-TR Dataset"""
23
+
24
+
25
+ import itertools
26
+ import json
27
+
28
+ import datasets
29
+ from datasets.tasks import QuestionAnsweringExtractive
30
+
31
+
32
+ logger = datasets.logging.get_logger(__name__)
33
+
34
+ _HOMEPAGE = "https://github.com/boun-tabi/squad-tr"
35
+
36
+ _CITATION = """\
37
+ @article{
38
+ budur2023squadtr,
39
+ title={Building Efficient and Effective OpenQA Systems for Low-Resource Languages},
40
+ author={todo},
41
+ journal={todo},
42
+ year={2023}
43
+ }
44
+ """
45
+
46
+ _DESCRIPTION = """\
47
+ SQuAD-TR is a machine translated version of the original SQuAD2.0 dataset into
48
+ Turkish.
49
+ """
50
+
51
+ _VERSION = "1.0.0"
52
+
53
+ _DATA_URL = _HOMEPAGE + "/raw/beta/data"
54
+ _DATA_URLS = {
55
+ "default": {
56
+ "train": f"{_DATA_URL}/squad-tr-train-v{_VERSION}.json.gz",
57
+ "dev": f"{_DATA_URL}/squad-tr-dev-v{_VERSION}.json.gz",
58
+ },
59
+ "excluded": {
60
+ "train": f"{_DATA_URL}/squad-tr-train-v{_VERSION}-excluded.json.gz",
61
+ "dev": f"{_DATA_URL}/squad-tr-dev-v{_VERSION}-excluded.json.gz",
62
+ }
63
+ }
64
+
65
+
66
+ class SquadTRConfig(datasets.BuilderConfig):
67
+ """BuilderConfig for SQuAD-TR."""
68
+
69
+ def __init__(self, **kwargs):
70
+ """BuilderConfig for SQuAD-TR.
71
+ Args:
72
+ **kwargs: keyword arguments forwarded to super.
73
+ """
74
+ super(SquadTRConfig, self).__init__(**kwargs)
75
+
76
+
77
+ class SquadTR(datasets.GeneratorBasedBuilder):
78
+ """SQuAD-TR: Machine translated version of the original SQuAD2.0 dataset into Turkish."""
79
+
80
+ VERSION = datasets.Version(_VERSION)
81
+
82
+ BUILDER_CONFIGS = [
83
+ SquadTRConfig(
84
+ name="default",
85
+ version=datasets.Version(_VERSION),
86
+ description="SQuAD-TR default version.",
87
+ ),
88
+ SquadTRConfig(
89
+ name="excluded",
90
+ version=datasets.Version(_VERSION),
91
+ description="SQuAD-TR excluded version.",
92
+ ),
93
+ SquadTRConfig(
94
+ name="openqa",
95
+ version=datasets.Version(_VERSION),
96
+ description="SQuAD-TR OpenQA version.",
97
+ ),
98
+ ]
99
+
100
+ DEFAULT_CONFIG_NAME = "default"
101
+
102
+ def _info(self):
103
+
104
+ # We change the contents of the "answers" field based on the
105
+ # configuration selected. Specifically, we are excluding the
106
+ # "answer_start" field for the "excluded" and "openqa" configurations.
107
+ if self.config.name in ["excluded", "openqa"]:
108
+ answers_feature = datasets.features.Sequence({
109
+ "text": datasets.Value("string"),
110
+ })
111
+ else:
112
+ answers_feature = datasets.features.Sequence({
113
+ "text": datasets.Value("string"),
114
+ "answer_start": datasets.Value("int32"),
115
+ })
116
+
117
+ # Constructing our dataset features.
118
+ features = datasets.Features({
119
+ "id": datasets.Value("string"),
120
+ "title": datasets.Value("string"),
121
+ "context": datasets.Value("string"),
122
+ "question": datasets.Value("string"),
123
+ "answers": answers_feature
124
+ })
125
+
126
+ return datasets.DatasetInfo(
127
+ description=_DESCRIPTION,
128
+ features=features,
129
+ supervised_keys=None,
130
+ homepage=_HOMEPAGE,
131
+ citation=_CITATION,
132
+ task_templates=[
133
+ QuestionAnsweringExtractive(question_column="question", context_column="context", answers_column="answers")
134
+ ],
135
+ )
136
+
137
+ def _split_generators(self, dl_manager):
138
+
139
+ # If the configuration selected is "default" or "excluded", we directly
140
+ # load the files from the URLs in _DATA_URLS. For the "openqa"
141
+ # configuration, we combine the datapints from the two different files
142
+ # used in the "default" and "excluded" configurations.
143
+ if self.config.name == "openqa":
144
+ default_files = dl_manager.download_and_extract(_DATA_URLS["default"])
145
+ excluded_files = dl_manager.download_and_extract(_DATA_URLS["excluded"])
146
+ train_file_paths = [default_files["train"], excluded_files["train"]]
147
+ dev_file_paths = [default_files["dev"], excluded_files["dev"]]
148
+
149
+ return [
150
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath_list": train_file_paths}),
151
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath_list": dev_file_paths}),
152
+ ]
153
+ else:
154
+ config_urls = _DATA_URLS[self.config.name]
155
+ downloaded_files = dl_manager.download_and_extract(config_urls)
156
+
157
+ return [
158
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
159
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
160
+ ]
161
+
162
+
163
+ def _generate_examples(self, filepath=None, filepath_list=None):
164
+ """This function returns the examples in the raw (text) form."""
165
+ assert filepath or filepath_list
166
+ if filepath:
167
+ filepath_list = [filepath]
168
+
169
+ # Combining the generators for the different filepaths
170
+ generators = [self._generate_examples_from_filepath(f) for f in filepath_list]
171
+ for generator in generators:
172
+ for element in generator:
173
+ yield element
174
+
175
+ def _generate_examples_from_filepath(self, filepath):
176
+ logger.info("generating examples from = %s", filepath)
177
+ key = 0
178
+ with open(filepath, encoding="utf-8") as f:
179
+ squad = json.load(f)
180
+ for article in squad["data"]:
181
+ title = article.get("title", "")
182
+ for paragraph in article["paragraphs"]:
183
+ context = paragraph["context"] # Do not strip leading blank spaces GH-2585
184
+ for qa in paragraph["qas"]:
185
+ # Constructing our answers dictonary. Note that the
186
+ # answers_dictionary won't include the answer_start
187
+ # field in the "excluded" and "openqa" modes.
188
+ answers_dictionary = {
189
+ "text": [answer["text"] for answer in qa["answers"]],
190
+ }
191
+ if self.config.name not in ["excluded", "openqa"]:
192
+ answers_dictionary["answer_start"] = [answer["answer_start"] for answer in qa["answers"]]
193
+
194
+ # Constructing our datapoint
195
+ datapoint = {
196
+ "title": title,
197
+ "context": context,
198
+ "question": qa["question"],
199
+ "id": qa["id"],
200
+ "answers": answers_dictionary,
201
+ }
202
+
203
+ # Features currently used are "context", "question", and "answers".
204
+ # Others are extracted here for the ease of future expansions.
205
+ yield key, datapoint
206
+ key += 1