Datasets:
Commit
·
4baed46
1
Parent(s):
9b698a8
Update loading script v1.2
Browse files- corpus-carolina.py +30 -24
corpus-carolina.py
CHANGED
@@ -14,6 +14,7 @@
|
|
14 |
# limitations under the License.
|
15 |
"""Carolina Corpus"""
|
16 |
|
|
|
17 |
from lxml import etree
|
18 |
import os
|
19 |
import datasets
|
@@ -30,7 +31,7 @@ robust volume of texts of varied typology in contemporary Brazilian Portuguese
|
|
30 |
|
31 |
|
32 |
_CITATION = r"""
|
33 |
-
@misc{corpusCarolinaV1.
|
34 |
title={
|
35 |
Carolina:
|
36 |
The Open Corpus for Linguistics and Artificial Intelligence},
|
@@ -52,7 +53,7 @@ _CITATION = r"""
|
|
52 |
Palma, Mayara Feliciano},
|
53 |
howpublished={\url{https://sites.usp.br/corpuscarolina/corpus}},
|
54 |
year={2022},
|
55 |
-
note={Version 1.
|
56 |
}
|
57 |
"""
|
58 |
|
@@ -81,13 +82,13 @@ def _taxonomies():
|
|
81 |
leg="legislative branch",
|
82 |
pub="public domain works",
|
83 |
soc="social media",
|
84 |
-
uni="
|
85 |
wik="wikis",
|
86 |
)
|
87 |
|
88 |
|
89 |
-
_VERSION = "1.
|
90 |
-
_CORPUS_URL = "corpus/{
|
91 |
_CHECKSUM_FNAME = _CORPUS_URL + "checksum.sha256"
|
92 |
|
93 |
|
@@ -140,6 +141,7 @@ class Carolina(datasets.GeneratorBasedBuilder):
|
|
140 |
|
141 |
return datasets.DatasetInfo(
|
142 |
description=_DESCRIPTION,
|
|
|
143 |
homepage=_HOMEPAGE,
|
144 |
citation=_CITATION,
|
145 |
features=features,
|
@@ -153,23 +155,21 @@ class Carolina(datasets.GeneratorBasedBuilder):
|
|
153 |
else:
|
154 |
taxonomies = [_taxonomies()[self.config.taxonomy]]
|
155 |
|
156 |
-
|
157 |
-
for
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
with open(checksum_path, encoding="utf-8") as cfile:
|
166 |
for line in cfile:
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
zip_urls[fname] = zip_url # xml -> zip url
|
173 |
|
174 |
# extractions are made in cache folders and
|
175 |
# the path returned is the folder path, not the
|
@@ -200,11 +200,17 @@ class Carolina(datasets.GeneratorBasedBuilder):
|
|
200 |
# parse xml file
|
201 |
for _, tei in etree.iterparse(path, **parser_params):
|
202 |
header = tei.find(f"{TEI_NS}teiHeader")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
203 |
|
204 |
example = {
|
205 |
-
"meta":
|
206 |
-
|
207 |
-
"text": tei.find(f".//{TEI_NS}body/{TEI_NS}p").text
|
208 |
}
|
209 |
yield _key, example
|
210 |
_key += 1
|
|
|
14 |
# limitations under the License.
|
15 |
"""Carolina Corpus"""
|
16 |
|
17 |
+
from collections import defaultdict
|
18 |
from lxml import etree
|
19 |
import os
|
20 |
import datasets
|
|
|
31 |
|
32 |
|
33 |
_CITATION = r"""
|
34 |
+
@misc{corpusCarolinaV1.2,
|
35 |
title={
|
36 |
Carolina:
|
37 |
The Open Corpus for Linguistics and Artificial Intelligence},
|
|
|
53 |
Palma, Mayara Feliciano},
|
54 |
howpublished={\url{https://sites.usp.br/corpuscarolina/corpus}},
|
55 |
year={2022},
|
56 |
+
note={Version 1.2 (Ada)},
|
57 |
}
|
58 |
"""
|
59 |
|
|
|
82 |
leg="legislative branch",
|
83 |
pub="public domain works",
|
84 |
soc="social media",
|
85 |
+
uni="university domains",
|
86 |
wik="wikis",
|
87 |
)
|
88 |
|
89 |
|
90 |
+
_VERSION = "1.2.0"
|
91 |
+
_CORPUS_URL = "corpus/{tax}/"
|
92 |
_CHECKSUM_FNAME = _CORPUS_URL + "checksum.sha256"
|
93 |
|
94 |
|
|
|
141 |
|
142 |
return datasets.DatasetInfo(
|
143 |
description=_DESCRIPTION,
|
144 |
+
supervised_keys=None,
|
145 |
homepage=_HOMEPAGE,
|
146 |
citation=_CITATION,
|
147 |
features=features,
|
|
|
155 |
else:
|
156 |
taxonomies = [_taxonomies()[self.config.taxonomy]]
|
157 |
|
158 |
+
# download checksum files
|
159 |
+
checksum_urls = {t: _CHECKSUM_FNAME.format(tax=t) for t in taxonomies}
|
160 |
+
checksum_paths = dl_manager.download(checksum_urls)
|
161 |
+
|
162 |
+
# prepare xml file name and zip urls
|
163 |
+
zip_urls = dict() # xml
|
164 |
+
for tax, cpath in checksum_paths.items():
|
165 |
+
tax_path = _CORPUS_URL.format(tax=tax)
|
166 |
+
with open(cpath, encoding="utf-8") as cfile:
|
|
|
167 |
for line in cfile:
|
168 |
+
xml_tax_path = line.split()[1] # xml file inside taxonomy
|
169 |
+
zip_fname = xml_tax_path + ".zip" # zip file inside taxonomy
|
170 |
+
xml_fname = xml_tax_path.split('/')[-1] # xml file name only
|
171 |
+
zip_fpath = os.path.join(tax_path, zip_fname) # path inside corpus
|
172 |
+
zip_urls[xml_fname] = zip_fpath # xml file -> zip path
|
|
|
173 |
|
174 |
# extractions are made in cache folders and
|
175 |
# the path returned is the folder path, not the
|
|
|
200 |
# parse xml file
|
201 |
for _, tei in etree.iterparse(path, **parser_params):
|
202 |
header = tei.find(f"{TEI_NS}teiHeader")
|
203 |
+
|
204 |
+
meta = etree.tostring(
|
205 |
+
header, encoding="utf-8").decode("utf-8")
|
206 |
+
text = ' '.join([e.text
|
207 |
+
for e in tei.findall(f".//{TEI_NS}body/{TEI_NS}p")
|
208 |
+
if e.text is not None
|
209 |
+
])
|
210 |
|
211 |
example = {
|
212 |
+
"meta": meta,
|
213 |
+
"text": text
|
|
|
214 |
}
|
215 |
yield _key, example
|
216 |
_key += 1
|