Datasets:

Languages:
Sabah Malay
ArXiv:
License:
holylovenia commited on
Commit
942eccf
·
verified ·
1 Parent(s): 5200b0c

Upload melayu_sabah.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. melayu_sabah.py +148 -0
melayu_sabah.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from pathlib import Path
16
+ from typing import Dict, List, Tuple
17
+
18
+ import datasets
19
+
20
+ from seacrowd.utils import schemas
21
+ from seacrowd.utils.configs import SEACrowdConfig
22
+ from seacrowd.utils.constants import TASK_TO_SCHEMA, Licenses, Tasks
23
+
24
+ _DATASETNAME = "melayu_sabah"
25
+
26
+
27
+ _DESCRIPTION = """\
28
+ Korpus Variasi Bahasa Melayu: Sabah is a language corpus sourced from various folklores in Melayu Sabah dialect.
29
+ """
30
+
31
+ _CITATION = """\
32
+ @misc{melayusabah,
33
+ author = {Hiroki Nomoto},
34
+ title = {Melayu_Sabah},
35
+ year = {2020},
36
+ publisher = {GitHub},
37
+ journal = {GitHub repository},
38
+ howpublished = {\\url{https://github.com/matbahasa/Melayu_Sabah}},
39
+ commit = {90a46c8268412ccc1f29cdcbbd47354474f12d50}
40
+ }
41
+ """
42
+
43
+ _HOMEPAGE = "https://github.com/matbahasa/Melayu_Sabah"
44
+
45
+
46
+ _LANGUAGES = ["msi"]
47
+
48
+ _LICENSE = Licenses.CC_BY_4_0.value
49
+
50
+ _LOCAL = False
51
+
52
+ _URLS = {
53
+ "sabah201701": "https://raw.githubusercontent.com/matbahasa/Melayu_Sabah/master/Sabah201701.txt",
54
+ "sabah201702": "https://raw.githubusercontent.com/matbahasa/Melayu_Sabah/master/Sabah201702.txt",
55
+ "sabah201901": "https://raw.githubusercontent.com/matbahasa/Melayu_Sabah/master/Sabah201901.txt",
56
+ "sabah201902": "https://raw.githubusercontent.com/matbahasa/Melayu_Sabah/master/Sabah201902.txt",
57
+ "sabah201903": "https://raw.githubusercontent.com/matbahasa/Melayu_Sabah/master/Sabah201903.txt",
58
+ "sabah201904": "https://raw.githubusercontent.com/matbahasa/Melayu_Sabah/master/Sabah201904.txt",
59
+ "sabah201905": "https://raw.githubusercontent.com/matbahasa/Melayu_Sabah/master/Sabah201905.txt",
60
+ "sabah201906": "https://raw.githubusercontent.com/matbahasa/Melayu_Sabah/master/Sabah201906.txt",
61
+ "sabah201907": "https://raw.githubusercontent.com/matbahasa/Melayu_Sabah/master/Sabah201907.txt",
62
+ "sabah201908": "https://raw.githubusercontent.com/matbahasa/Melayu_Sabah/master/Sabah201908.txt",
63
+ "sabah201909": "https://raw.githubusercontent.com/matbahasa/Melayu_Sabah/master/Sabah201909.txt",
64
+ }
65
+
66
+ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
67
+
68
+ _SOURCE_VERSION = "1.0.0"
69
+
70
+ _SEACROWD_VERSION = "2024.06.20"
71
+
72
+
73
+ class MelayuSabah(datasets.GeneratorBasedBuilder):
74
+ """Korpus Variasi Bahasa Melayu:
75
+ Sabah is a language corpus sourced from various folklores in Melayu Sabah dialect."""
76
+
77
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
78
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
79
+
80
+ SEACROWD_SCHEMA_NAME = TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]].lower()
81
+
82
+ BUILDER_CONFIGS = [
83
+ SEACrowdConfig(
84
+ name=f"{_DATASETNAME}_source",
85
+ version=SOURCE_VERSION,
86
+ description=f"{_DATASETNAME} source schema",
87
+ schema="source",
88
+ subset_id=f"{_DATASETNAME}",
89
+ ),
90
+ SEACrowdConfig(
91
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
92
+ version=SEACROWD_VERSION,
93
+ description=f"{_DATASETNAME} SEACrowd schema",
94
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
95
+ subset_id=f"{_DATASETNAME}",
96
+ ),
97
+ ]
98
+
99
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
100
+
101
+ def _info(self) -> datasets.DatasetInfo:
102
+ if self.config.schema == "source":
103
+ features = datasets.Features(
104
+ {
105
+ "id": datasets.Value("string"),
106
+ "text": datasets.Value("string"),
107
+ }
108
+ )
109
+ elif self.config.schema == "seacrowd_ssp":
110
+ features = schemas.self_supervised_pretraining.features
111
+
112
+ return datasets.DatasetInfo(
113
+ description=_DESCRIPTION,
114
+ features=features,
115
+ homepage=_HOMEPAGE,
116
+ license=_LICENSE,
117
+ citation=_CITATION,
118
+ )
119
+
120
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
121
+ """Returns SplitGenerators."""
122
+
123
+ urls = [_URLS[key] for key in _URLS.keys()]
124
+ data_path = dl_manager.download_and_extract(urls)
125
+
126
+ return [
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.TRAIN,
129
+ gen_kwargs={"filepath": data_path[0], "split": "train", "other_path": data_path[1:]},
130
+ )
131
+ ]
132
+
133
+ def _generate_examples(self, filepath: Path, split: str, other_path: List) -> Tuple[int, Dict]:
134
+ """Yields examples as (key, example) tuples."""
135
+ filepaths = [filepath] + other_path
136
+ data = []
137
+ for filepath in filepaths[:2]:
138
+ with open(filepath, "r") as f:
139
+ sentences = [line.rstrip() for line in f.readlines()]
140
+ sentences = [sentence.split("\t")[-1] for sentence in sentences]
141
+ data.append("\n".join(sentences))
142
+
143
+ for filepath in filepaths[2:]:
144
+ with open(filepath, "r") as f:
145
+ data.append([line.rstrip() for line in f.readlines()])
146
+
147
+ for id, text in enumerate(data):
148
+ yield id, {"id": id, "text": text}