khalidalt commited on
Commit
89d5a24
·
1 Parent(s): 3853494

add subscene script

Browse files
Files changed (1) hide show
  1. subscene.py +144 -0
subscene.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+ import gzip
24
+
25
+ # TODO: Add BibTeX citation
26
+ # Find for instance the citation on arxiv or on the dataset repo/website
27
+ _CITATION = """\
28
+ @InProceedings{huggingface:dataset,
29
+ title = {A great new dataset},
30
+ author={huggingface, Inc.
31
+ },
32
+ year={2020}
33
+ }
34
+ """
35
+
36
+ # TODO: Add description of the dataset here
37
+ # You can copy an official description
38
+ _DESCRIPTION = """\
39
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
40
+ """
41
+
42
+ # TODO: Add a link to an official homepage for the dataset here
43
+ _HOMEPAGE = ""
44
+
45
+ # TODO: Add the licence for the dataset here if you can find it
46
+ _LICENSE = ""
47
+
48
+ # TODO: Add link to the official dataset URLs here
49
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
50
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
51
+ _URLS = "https://huggingface.co/datasets/khalidalt/subscene/resolve/main/train/{Lang}_subscene_{split}_{index}.json.gz"
52
+
53
+ _N_FILES_PER_SPLIT = {
54
+ 'arabic': {'train':33 },
55
+ }
56
+
57
+ _LangID = ['arabic']
58
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
59
+
60
+ class SubsceneConfig(datasets.BuilderConfig):
61
+ """ Builder config for Subscene Dataset. """
62
+
63
+ def __init__(self, subset, **kwargs):
64
+ super(SubsceneConfig, self).__init__(**kwargs)
65
+
66
+ if subset !="all":
67
+
68
+ self.subset = [subset]
69
+ else:
70
+ self.subset = _LangID
71
+
72
+ class Subscene(datasets.GeneratorBasedBuilder):
73
+ """TODO: Short description of my dataset."""
74
+
75
+ VERSION = datasets.Version("1.1.0")
76
+ BUILDER_CONFIGS_CLASS = SubsceneConfig
77
+ BUILDER_CONFIGS = [
78
+ SubsceneConfig(name=subset,
79
+ subset=subset,
80
+ version=datasets.Version("1.1.0", ""),
81
+ description='')
82
+ for subset in _LangID
83
+ ]
84
+
85
+
86
+ def _info(self):
87
+ # information about the datasets and feature type of the datasets items.
88
+
89
+ features = datasets.Features(
90
+ {
91
+ "text": datasets.Value("string"),
92
+ "meta": datasets.Value("string"),
93
+ }
94
+ )
95
+
96
+ return datasets.DatasetInfo(
97
+ description=_DESCRIPTION,
98
+ features=features,
99
+ homepage=_HOMEPAGE,
100
+ # License for the dataset if available
101
+ license=_LICENSE,
102
+ # Citation for the dataset
103
+ citation=_CITATION,
104
+ )
105
+
106
+ def _split_generators(self, dl_manager):
107
+ #split = 'train'
108
+ #print("Split")
109
+ data_urls = {}
110
+ for split in ['train']: #'validation']:
111
+ #if self.config.subset = "all":
112
+
113
+ data_urls[split] = [
114
+ _URLS.format(
115
+ Lang = subset,
116
+ split='validation' if split=='_val' else '',
117
+ index = i,
118
+ )
119
+ for subset in self.config.subset
120
+ for i in range(_N_FILES_PER_SPLIT[subset][split])
121
+ ]
122
+
123
+ train_downloaded_files = dl_manager.download(data_urls["train"])
124
+ #validation_downloaded_files = dl_manager.download(data_urls["validation"])
125
+ return [
126
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
127
+ #datasets.SplitGenerator(
128
+ # name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
129
+ #),
130
+ ]
131
+
132
+
133
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
134
+ def _generate_examples(self, filepaths):
135
+
136
+ id_ = 0
137
+ for filepath in filepaths:
138
+ with gzip.open(open(filepath,"rb"), "rt", encoding = "utf-8") as f:
139
+ for row in f:
140
+ if row:
141
+
142
+ data = json.loads(row)
143
+ yield id_, data
144
+ id_ +=1