Datasets:
License:
File size: 7,316 Bytes
15a95e7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
"""The Indonesian Wiki Loader"""
import os
import re
import pandas as pd
import datasets
_CITATIONS = """\
@ONLINE{wikidump,
author = "Wikimedia Foundation",
title = "Wikimedia Downloads",
url = "https://dumps.wikimedia.org"}
@ONLINE{wikipedia-hf,
title = "Huggingface Wikipedia Dataset",
url = "https://huggingface.co/datasets/wikipedia"}"""
_REPO_URL = "https://huggingface.co/datasets/sabilmakbar/indonesian_wiki"
_LICENSE = (
"This work is licensed under the Creative Commons Attribution-ShareAlike "
"3.0 Unported License. To view a copy of this license, visit "
"http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
"Creative Commons, PO Box 1866, Mountain View, CA 94042, USA."
)
_INDO_WIKI_RAW_DESCRIPTION = """\
Indonesian Wikipedia Data Repository contains Wikipedia Data from Wikipedia HF that focuses
on extraction in Indonesian Languange and Indonesian Local Languages, that some of them
are considered as low-resource languages or extremely low-resource languages"""
_INDO_WIKI_DEDUP_DESCRIPTION = """\
This is a derivative of Indonesian Wikipedia Data Repository which is already pre-processed
by identifying and dropping duplicates to prevent boilerplate texts occuring in dataset"""
_AVAILABLE_DUMP_VERSION_DATE = ["20230901"]
_AVAILABLE_DUMP_LANGUAGES = ["ace", "ban", "bjn", "bug", "gor", "id", "jv", "map-bms", "min", "ms", "nia", "su", "tet"]
def _construct_dataset_from_dset_version_and_lang(date_ver: str, lang: str, mode: str):
_mode_to_folder_mapper = {"dedup": "indo_wiki_dedup_data", "raw": "indo_wiki_raw_data"}
_mode_to_file_suffix_mapper = {"dedup": "dataset_soft_hard_cleansed.csv", "raw": "raw_dataset.csv"}
return os.path.join(_mode_to_folder_mapper[mode], f"wiki_{lang}_{date_ver}_{_mode_to_file_suffix_mapper[mode]}")
class IndoWikiConfig(datasets.BuilderConfig):
"""BuilderConfig for IndoWiki."""
def __init__(self, description: str=None, features: list=['url', 'title', 'text'],
data_url: str=None, date_stamp: str=None, lang: str=None,
mode = "dedup", **kwargs):
"""BuilderConfig for IndoWiki.
Args:
description: `string`, description of dataset
features: `list[string]`, list of the features that will appear in the
feature dict. Should not include "label" if it's a supervised.
data_url: `string`, url to download the data.
date_stamp: `string`, wikidump date_stamp for data available in repo.
lang: `string`, language to be loaded.
**kwargs: keyword arguments forwarded to super.
"""
# validate configs
if mode not in ["dedup", "raw"]:
raise ValueError(f"Error occured! Expected values are 'dedup' or 'raw' for arg `mode`, received {mode}!")
if (lang is None or date_stamp is None) and data_url is None:
raise ValueError("Expected `data_url` is provided or both `date_stamp` and `lang` are provided!")
_mode_to_desc_mapper = {"dedup": _INDO_WIKI_DEDUP_DESCRIPTION, "raw":_INDO_WIKI_RAW_DESCRIPTION}
if date_stamp is not None and date_stamp not in _AVAILABLE_DUMP_VERSION_DATE:
raise ValueError("Provided `date_stamp` dataset versioning doesn't match! Please re-check")
if lang is not None and lang not in _AVAILABLE_DUMP_LANGUAGES:
raise ValueError("Provided `lang` doesn't match! Please re-check")
super(IndoWikiConfig, self).__init__(**kwargs)
self.features = features
# prioritize kwargs data_url
if data_url is not None:
self.data_url = data_url
else:
self.data_url = _construct_dataset_from_dset_version_and_lang(date_ver=date_stamp, lang=lang, mode=mode)
# auto-construct desc if not provided
if description is None:
self.description = _mode_to_desc_mapper[mode] + "\n" + f"From file path {self.data_url}"
#define citations & info URL internally in config class
self.citation = _CITATIONS
self.url = _REPO_URL
class IndoWiki(datasets.GeneratorBasedBuilder):
"""The IndoWiki Dataset."""
#if name isn't provided, will create a dataset of all languages
DEFAULT_CONFIG_NAME = "indowiki_dedup_all"
BUILDER_CONFIG_CLASS = IndoWikiConfig
_newest_data_raw_all_langs = [_construct_dataset_from_dset_version_and_lang(
date_ver=sorted(_AVAILABLE_DUMP_VERSION_DATE)[-1], lang=lang, mode="raw") for lang in _AVAILABLE_DUMP_LANGUAGES]
_newest_data_dedup_all_langs = [_construct_dataset_from_dset_version_and_lang(
date_ver=sorted(_AVAILABLE_DUMP_VERSION_DATE)[-1], lang=lang, mode="dedup") for lang in _AVAILABLE_DUMP_LANGUAGES]
BUILDER_CONFIGS = [
IndoWikiConfig(
name="indowiki_all",
description=_INDO_WIKI_RAW_DESCRIPTION,
data_url=_newest_data_raw_all_langs
),
IndoWikiConfig(
name="indowiki_dedup_all",
description=_INDO_WIKI_DEDUP_DESCRIPTION,
data_url=_newest_data_dedup_all_langs
),
IndoWikiConfig(
name="indowiki_dedup_id_only",
lang="id",
date_stamp="20230901"
)
]
def _info(self):
features = {feature: datasets.Value("string") for feature in self.config.features}
return datasets.DatasetInfo(
description = self.config.description,
features = datasets.Features(features),
homepage = self.config.url,
citation = self.config.citation,
license=_LICENSE)
@staticmethod
def _get_lang_name_from_data_url(data_url: str):
#lang code occurred after "wiki_" and before date versioning (using 8len date)
_list_folder_sep = data_url.split("/")[-1].split("_")
_min_pos = min([pos for pos, data in enumerate(_list_folder_sep) if bool(re.search("\d{8}", data))])
return re.sub("[^\w\.]", "_", "_".join(_list_folder_sep[1:_min_pos]))
def _split_generators(self, dl_manager):
if self.config.name in ("indowiki_all", "indowiki_dedup_all"):
file_dict = {self._get_lang_name_from_data_url(file): file for file in self.config.data_url}
dl_dir = dl_manager.download_and_extract(file_dict)
return [
datasets.SplitGenerator(
name=datasets.Split(split_name),
gen_kwargs={
"data_file": file_name
}
)
#dl_dir is a dictionary containing lang or split as keyname and file path as value
for split_name, file_name in dl_dir.items()]
else:
dl_dir = dl_manager.download_and_extract(self.config.data_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": dl_dir
},
)
]
def _generate_examples(self, data_file):
pd_df = pd.read_csv(data_file)
for _, row in pd_df.iterrows():
example = {feature: row[feature] for feature in self.config.features}
idx = row["id"]
yield idx, example
|