holylovenia commited on
Commit
3ec5ac7
·
1 Parent(s): ca1c72a

Upload kopi_cc_news.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. kopi_cc_news.py +139 -0
kopi_cc_news.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ KoPI-CC_NEWS corpus
18
+
19
+ [nusantara_schema_name] = ssp
20
+ """
21
+
22
+ import json
23
+ from typing import List
24
+
25
+ import datasets
26
+ import zstandard as zstd
27
+
28
+ from nusacrowd.utils import schemas
29
+ from nusacrowd.utils.configs import NusantaraConfig
30
+ from nusacrowd.utils.constants import (DEFAULT_NUSANTARA_VIEW_NAME,
31
+ DEFAULT_SOURCE_VIEW_NAME, Tasks)
32
+
33
+ _DATASETNAME = "kopi_cc_news"
34
+ _LOCAL = False
35
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
36
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
37
+ _UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
38
+ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
39
+ _URL = "https://commoncrawl.org/"
40
+ _CITATION = """\
41
+
42
+ """
43
+
44
+ _DESCRIPTION = """\
45
+ KoPI(Korpus Perayapan Indonesia)-CC_News is Indonesian Only Extract from CC NEWS Common Crawl from 2016-2022(july) ,each snapshots get extracted using warcio,trafilatura and filter using fasttext
46
+ """
47
+
48
+ _HOMEPAGE = "https://huggingface.co/datasets/munggok/KoPI-CC_News"
49
+
50
+ _LICENSE = "CC0"
51
+
52
+ _URLS = "https://huggingface.co/datasets/munggok/KoPI-CC_News/resolve/main/data/cc_news_{year}_id.jsonl.zst"
53
+
54
+ _YEAR = ["2016", "2017", "2018", "2019", "2020", "2021", "2022"]
55
+
56
+ _ALL_CONFIG = _YEAR + ["all"]
57
+
58
+ _SOURCE_VERSION = "2018.12.01"
59
+
60
+ _NUSANTARA_VERSION = "1.0.0"
61
+
62
+
63
+ def nusantara_config_constructor(year, schema, version):
64
+ """Construct NusantaraConfig"""
65
+ if schema != "source" and schema != "nusantara_ssp":
66
+ raise ValueError(f"Invalid schema: {schema}")
67
+
68
+ if year == "":
69
+ raise ValueError(f"Snapshot is required. Choose one of these Snapshot: {_ALL_CONFIG}.")
70
+ elif year in _ALL_CONFIG:
71
+ return NusantaraConfig(
72
+ name=f"{_DATASETNAME}_{year}_{schema}",
73
+ version=datasets.Version(version),
74
+ description=f"KoPI-CC_News with {schema} schema for {year}",
75
+ schema=schema,
76
+ subset_id="kopi_cc_news",
77
+ )
78
+ else:
79
+ raise ValueError(f"Invalid language: {year}. Choose one of these snapshots: {_ALL_CONFIG}.")
80
+
81
+
82
+ class KoPICCNEWS(datasets.GeneratorBasedBuilder):
83
+
84
+ DEFAULT_CONFIG_NAME = "2016"
85
+
86
+ BUILDER_CONFIGS = [nusantara_config_constructor(sn, "source", _SOURCE_VERSION) for sn in _ALL_CONFIG] + [nusantara_config_constructor(sn, "nusantara_ssp", _NUSANTARA_VERSION) for sn in _ALL_CONFIG]
87
+
88
+ def _info(self):
89
+ if self.config.schema == "source":
90
+ features = datasets.Features(
91
+ {
92
+ "text": datasets.Value("string"),
93
+ "timestamp": datasets.Value("string"),
94
+ "url": datasets.Value("string"),
95
+ "meta": datasets.Value("string"),
96
+ }
97
+ )
98
+ elif self.config.schema == "nusantara_ssp":
99
+ features = schemas.self_supervised_pretraining.features
100
+
101
+ return datasets.DatasetInfo(
102
+ description=_DESCRIPTION,
103
+ features=features,
104
+ homepage=_HOMEPAGE,
105
+ license=_LICENSE,
106
+ citation=_CITATION,
107
+ )
108
+
109
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
110
+ """Returns SplitGenerators."""
111
+ name = self.config.name.replace("_" + self.config.schema, "")
112
+ name = name.replace(_DATASETNAME + "_", "")
113
+ if name == "all":
114
+ urls = [_URLS.format(year=m) for m in _YEAR]
115
+ else:
116
+ urls = [_URLS.format(year=name)]
117
+ path = dl_manager.download(urls)
118
+
119
+ return [
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.TRAIN,
122
+ gen_kwargs={"filepaths": path, "split": "train"},
123
+ ),
124
+ ]
125
+
126
+ def _generate_examples(self, filepaths, split):
127
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
128
+ id_ = 0
129
+ for filepath in filepaths:
130
+ with zstd.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
131
+ for line in f:
132
+ if line:
133
+ example = json.loads(line)
134
+ if self.config.schema == "nusantara_ssp":
135
+ yield id_, {"id": str(id_), "text": example["text"]}
136
+ id_ += 1
137
+ else:
138
+ yield id_, {"text": example["text"], "url": example["url"], "timestamp": example["timestamp"], "meta": example["meta"]}
139
+ id_ += 1