Datasets:
Upload sea_wiki.py with huggingface_hub
Browse files- sea_wiki.py +216 -0
sea_wiki.py
ADDED
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
SEA Crowd Data Loader for SEA Wiki.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import json
|
6 |
+
from itertools import product
|
7 |
+
from typing import Dict, List, Tuple
|
8 |
+
|
9 |
+
import datasets
|
10 |
+
from datasets import load_dataset
|
11 |
+
from datasets.download.download_manager import DownloadManager
|
12 |
+
|
13 |
+
from seacrowd.sea_datasets.sea_wiki.lang_config import _LANG_CONFIG
|
14 |
+
from seacrowd.utils import schemas
|
15 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
16 |
+
from seacrowd.utils.constants import Licenses, Tasks
|
17 |
+
|
18 |
+
_CITATION = """
|
19 |
+
@ONLINE{wikidump,
|
20 |
+
author = "Wikimedia Foundation",
|
21 |
+
title = "Wikimedia Downloads",
|
22 |
+
url = "https://dumps.wikimedia.org"}
|
23 |
+
@ONLINE{wikipedia-hf,
|
24 |
+
title = "Huggingface Wikipedia Dataset",
|
25 |
+
url = "https://huggingface.co/datasets/wikipedia"}
|
26 |
+
@ONLINE{wikipedia-hf,
|
27 |
+
title = "Huggingface SEA Wikipedia Dataset",
|
28 |
+
url = "https://huggingface.co/datasets/sabilmakbar/sea_wiki"}
|
29 |
+
"""
|
30 |
+
|
31 |
+
logger = datasets.logging.get_logger(__name__)
|
32 |
+
|
33 |
+
_LOCAL = False
|
34 |
+
_LANGUAGES = list(_LANG_CONFIG.keys())
|
35 |
+
|
36 |
+
_DATASETNAME = "sea_wiki"
|
37 |
+
_DESCRIPTION = """\
|
38 |
+
SEA Lang & Local Langs Wikipedia Archives, dumped from WIkipedia HF and processed by boilerplate removal.
|
39 |
+
This dataset consists of URL of referred Wikipedia Article, its Title, and its Text Data (Article Contents).
|
40 |
+
"""
|
41 |
+
|
42 |
+
_HOMEPAGE = "https://huggingface.co/datasets/sabilmakbar/sea_wiki"
|
43 |
+
_LICENSE = Licenses.CC_BY_SA_4_0.value
|
44 |
+
|
45 |
+
# url won't be used since it will implement load_dataset method on HF URL provided
|
46 |
+
_URL = "https://huggingface.co/datasets/sabilmakbar/sea_wiki"
|
47 |
+
|
48 |
+
_SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING, Tasks.SUMMARIZATION]
|
49 |
+
_SOURCE_VERSION = "1.0.0"
|
50 |
+
_SEACROWD_VERSION = "2024.06.20"
|
51 |
+
|
52 |
+
CONFIG_SUFFIXES_FOR_TASK = ["ssp", "t2t"]
|
53 |
+
|
54 |
+
|
55 |
+
def conform_init_config():
|
56 |
+
"""Assertion Function for Instantiated Configs"""
|
57 |
+
if len(_LANGUAGES) == 0:
|
58 |
+
raise AssertionError("No Languages detected from config!")
|
59 |
+
if len(CONFIG_SUFFIXES_FOR_TASK) != len(_SUPPORTED_TASKS):
|
60 |
+
raise AssertionError("Config prefixes doesn't matched in terms of `len` with `_SUPPORTED_TASKS`!")
|
61 |
+
if len(CONFIG_SUFFIXES_FOR_TASK) == 0:
|
62 |
+
raise AssertionError("Config prefixes and `_SUPPORTED_TASKS` have `len` of 0!")
|
63 |
+
|
64 |
+
|
65 |
+
conform_init_config()
|
66 |
+
|
67 |
+
# construct zipped arg for config instantiation
|
68 |
+
SCHEMA_PREFIX_AND_VERSION_PAIRS = list(zip(("source", "seacrowd"), (_SOURCE_VERSION, _SEACROWD_VERSION)))
|
69 |
+
CONFIG_NAME_AND_TASKS_PAIRS = list(zip(CONFIG_SUFFIXES_FOR_TASK, _SUPPORTED_TASKS))
|
70 |
+
|
71 |
+
|
72 |
+
def construct_configs(languages: list = None) -> List[SEACrowdConfig]:
|
73 |
+
"""
|
74 |
+
The function `construct_configs` constructs a list of SEACrowdConfig objects based on the provided
|
75 |
+
languages or a default language, and returns the list.
|
76 |
+
|
77 |
+
input:
|
78 |
+
languages (list, default None): The `languages` parameter is a list that specifies the languages for which the
|
79 |
+
configurations need to be constructed. If no languages are provided (value=None), the first value in language config
|
80 |
+
will be used.
|
81 |
+
output:
|
82 |
+
a list of `SEACrowdConfig` objects based on instantiated init variables
|
83 |
+
"""
|
84 |
+
# set output var
|
85 |
+
config_list = []
|
86 |
+
|
87 |
+
# set default task for default config w/o task arg name (set to Tasks.SUMMARIZATION)
|
88 |
+
_DEFAULT_TASK_IDX = [idx for idx, val in enumerate(_SUPPORTED_TASKS) if val == Tasks.SUMMARIZATION]
|
89 |
+
|
90 |
+
# assert `_DEFAULT_TASK_IDX` to have len of 1
|
91 |
+
if len(_DEFAULT_TASK_IDX) != 1:
|
92 |
+
raise AssertionError("Unexpected `_DEFAULT_TASK` #item!")
|
93 |
+
|
94 |
+
_DEFAULT_CONFIG_SUFFIX, _DEFAULT_TASK = list(CONFIG_NAME_AND_TASKS_PAIRS)[_DEFAULT_TASK_IDX[0]]
|
95 |
+
|
96 |
+
# check `languages` variable and create config accordingly
|
97 |
+
if languages is None:
|
98 |
+
# set languages arg as list of first entry in `_LANGUAGES` if no lang arg received
|
99 |
+
_languages = _LANGUAGES[0]
|
100 |
+
|
101 |
+
config_list += [
|
102 |
+
SEACrowdConfig(
|
103 |
+
name=f"{_DATASETNAME}_{config_name_prefix}",
|
104 |
+
version=datasets.Version(version),
|
105 |
+
description=f"{_DATASETNAME} {config_name_prefix} schema for default task arg ({_DEFAULT_TASK})",
|
106 |
+
schema=f"{config_name_prefix}_{_DEFAULT_CONFIG_SUFFIX}",
|
107 |
+
subset_id=_languages,
|
108 |
+
)
|
109 |
+
for (config_name_prefix, version) in SCHEMA_PREFIX_AND_VERSION_PAIRS
|
110 |
+
]
|
111 |
+
config_list += [
|
112 |
+
SEACrowdConfig(
|
113 |
+
name=f"{_DATASETNAME}_{config_name_prefix}_{config_name_suffix}",
|
114 |
+
version=datasets.Version(version),
|
115 |
+
description=f"{_DATASETNAME} {config_name_prefix} schema for {task_obj.name}",
|
116 |
+
schema=f"{config_name_prefix}_{config_name_suffix}",
|
117 |
+
subset_id=_languages,
|
118 |
+
)
|
119 |
+
for (config_name_prefix, version), (config_name_suffix, task_obj) in product(SCHEMA_PREFIX_AND_VERSION_PAIRS, CONFIG_NAME_AND_TASKS_PAIRS)
|
120 |
+
]
|
121 |
+
|
122 |
+
# else, construct configs based on its lang
|
123 |
+
else:
|
124 |
+
for _LANG in languages:
|
125 |
+
config_list += [
|
126 |
+
SEACrowdConfig(
|
127 |
+
name=f"{_DATASETNAME}_{config_name_prefix}_{_LANG}_{config_name_suffix}",
|
128 |
+
version=datasets.Version(version),
|
129 |
+
description=f"{_DATASETNAME} {config_name_prefix} schema for {task_obj.name} and language code {_LANG}",
|
130 |
+
schema=f"{config_name_prefix}_{config_name_suffix}",
|
131 |
+
subset_id=_LANG,
|
132 |
+
)
|
133 |
+
for (config_name_prefix, version), (config_name_suffix, task_obj) in product(SCHEMA_PREFIX_AND_VERSION_PAIRS, CONFIG_NAME_AND_TASKS_PAIRS)
|
134 |
+
]
|
135 |
+
|
136 |
+
return config_list
|
137 |
+
|
138 |
+
|
139 |
+
class SEAWikiDataset(datasets.GeneratorBasedBuilder):
|
140 |
+
"""SEA Wiki dataset from https://huggingface.co/datasets/sabilmakbar/sea_wiki"""
|
141 |
+
|
142 |
+
# get all schema w/o lang arg + get all schema w/ lang arg
|
143 |
+
BUILDER_CONFIGS = construct_configs() + construct_configs(_LANGUAGES)
|
144 |
+
|
145 |
+
def _info(self) -> datasets.DatasetInfo:
|
146 |
+
_config_schema_name = self.config.schema
|
147 |
+
logger.info(f"Received schema name: {self.config.schema}")
|
148 |
+
# self supervised training schema
|
149 |
+
if CONFIG_SUFFIXES_FOR_TASK[0] in _config_schema_name:
|
150 |
+
if "source" in _config_schema_name:
|
151 |
+
features = datasets.Features({"url": datasets.Value("string"), "text": datasets.Value("string")})
|
152 |
+
|
153 |
+
elif "seacrowd" in _config_schema_name:
|
154 |
+
features = schemas.ssp_features
|
155 |
+
|
156 |
+
else:
|
157 |
+
raise ValueError(f"Unexpected schema received! {_config_schema_name}")
|
158 |
+
|
159 |
+
# summarization schema
|
160 |
+
elif CONFIG_SUFFIXES_FOR_TASK[1] in _config_schema_name:
|
161 |
+
if "source" in _config_schema_name:
|
162 |
+
features = datasets.Features({"url": datasets.Value("string"), "title": datasets.Value("string"), "text": datasets.Value("string")})
|
163 |
+
|
164 |
+
elif "seacrowd" in _config_schema_name:
|
165 |
+
features = schemas.text2text_features
|
166 |
+
|
167 |
+
else:
|
168 |
+
raise ValueError(f"Unexpected schema received! {_config_schema_name}")
|
169 |
+
|
170 |
+
else:
|
171 |
+
raise ValueError(f"Received unexpected config schema of {_config_schema_name}!")
|
172 |
+
|
173 |
+
return datasets.DatasetInfo(
|
174 |
+
description=_DESCRIPTION,
|
175 |
+
features=features,
|
176 |
+
homepage=_HOMEPAGE,
|
177 |
+
license=_LICENSE,
|
178 |
+
citation=_CITATION,
|
179 |
+
)
|
180 |
+
|
181 |
+
def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
|
182 |
+
# args of dl_manager is a placeholder since this data loader will wrap the hf `load_dataset` from given _URL
|
183 |
+
# directly using `_load_hf_data_from_remote`
|
184 |
+
return [datasets.SplitGenerator(name=datasets.Split.TRAIN)]
|
185 |
+
|
186 |
+
def _load_hf_data_from_remote(self):
|
187 |
+
# construct remote_hf_reference by the last 2 of string-spliited of "/"
|
188 |
+
_remote_hf_reference = "/".join(_URL.split("/")[-2:])
|
189 |
+
_lang_args = _LANG_CONFIG[self.config.subset_id]["source_subset"]
|
190 |
+
_split = "train"
|
191 |
+
|
192 |
+
logger.info(f"Loading dataset from remote HF {_remote_hf_reference} with seacrowd lang args of {self.config.subset_id} and source lang args of {_lang_args} and split args of {_split}")
|
193 |
+
_hf_dataset_source = load_dataset(_remote_hf_reference, lang=_lang_args, split=_split)
|
194 |
+
|
195 |
+
return _hf_dataset_source
|
196 |
+
|
197 |
+
def _generate_examples(self) -> Tuple[int, Dict]:
|
198 |
+
|
199 |
+
_config_schema_name = self.config.schema
|
200 |
+
loaded_data = self._load_hf_data_from_remote()
|
201 |
+
|
202 |
+
# iterate over datapoints and arrange hf dataset schema in source to match w/ config args:
|
203 |
+
for id_, _data in enumerate(loaded_data):
|
204 |
+
if "source" in _config_schema_name:
|
205 |
+
yield id_, {colname: _data[colname] for colname in self.info.features}
|
206 |
+
|
207 |
+
# for ssp schema
|
208 |
+
elif "seacrowd" in _config_schema_name and CONFIG_SUFFIXES_FOR_TASK[0] in _config_schema_name:
|
209 |
+
yield id_, {"id": id_, "text": _data["text"]}
|
210 |
+
|
211 |
+
# for summary schema
|
212 |
+
elif "seacrowd" in _config_schema_name and CONFIG_SUFFIXES_FOR_TASK[1] in _config_schema_name:
|
213 |
+
yield id_, {"id": id_, "text_1": _data["text"], "text_2": _data["title"], "text_1_name": "document", "text_2_name": "title"}
|
214 |
+
|
215 |
+
else:
|
216 |
+
raise ValueError(f"Received unexpected config schema of {_config_schema_name}!")
|