Datasets:
Formats:
json
Size:
10M - 100M
"""Mímir Core v1 dataset.""" | |
import gzip | |
import json | |
import datasets | |
logger = datasets.logging.get_logger(__name__) | |
_DESCRIPTION = """\\nMímir Core v1.""" | |
_CITATION = """ | |
""" | |
_URL = "https://github.com/NbAiLab/mimir-data" | |
_DATA_URL = "https://huggingface.co/datasets/mimir-project/mimir-core/resolve/main/data/{split_suffix}-{segment}-{index:04d}-of-{n_shards:04d}.json" | |
_N_SHARDS_PER_SPLIT = { | |
"bad": {"train": 6, "validation": 1}, | |
"medium": {"train": 21, "validation": 1}, | |
"good": {"train": 7, "validation": 1}, | |
} | |
_SEGMENTS = ("bad", "medium", "good") | |
class MimirCoreConfig(datasets.BuilderConfig): | |
"""BuilderConfig for MimirCore.""" | |
def __init__(self, name=None, *args, **kwargs): | |
"""BuilderConfig for MimirCore. | |
Args: | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
if name is None: | |
name = "default" | |
elif name not in _SEGMENTS: | |
raise ValueError(f"Invalid segment option '{name}'. Options are {str(_SEGMENTS)}.") | |
self.name = name | |
super().__init__( | |
*args, | |
name=name, | |
**kwargs, | |
) | |
class MimirCore(datasets.GeneratorBasedBuilder): | |
"""Mimir Core v1.""" | |
BUILDER_CONFIGS = [MimirCoreConfig()] + [MimirCoreConfig(segment) for segment in _SEGMENTS] | |
BUILDER_CONFIG_CLASS = MimirCoreConfig | |
DEFAULT_CONFIG_NAME = "default" | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"id": datasets.Value("string"), | |
"doc_type": datasets.Value("string"), | |
"publish_year": datasets.Value("int32"), | |
"lang_fasttext": datasets.Value("string"), | |
"lang_fasttext_conf": datasets.Value("string"), | |
"text": datasets.Value("string"), | |
"perplexity": datasets.Value("float"), | |
"perplexity_model": datasets.Value("string"), | |
"harmful_pp": datasets.Value("float"), | |
"segment": datasets.Value("string"), | |
} | |
), | |
supervised_keys=None, | |
homepage=_URL, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
if self.config.name != "default": | |
segments = [self.config.name] | |
else: | |
segments = _SEGMENTS | |
data_urls = {} | |
for split in ["train", "validation"]: | |
data_urls[split] = [] | |
for segment in segments: | |
data_urls[split] += [ | |
_DATA_URL.format( | |
split_suffix=split, | |
segment=segment, | |
index=index, | |
n_shards=_N_SHARDS_PER_SPLIT[segment][split], | |
) | |
for index in range(1, _N_SHARDS_PER_SPLIT[segment][split] + 1) | |
] | |
train_downloaded_files = dl_manager.download(data_urls["train"]) | |
validation_downloaded_files = dl_manager.download(data_urls["validation"]) | |
return [ | |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files} | |
), | |
] | |
def _generate_examples(self, filepaths): | |
"""This function returns the examples in the raw (text) form by iterating on all the files.""" | |
id_ = 0 | |
for filepath in filepaths: | |
logger.info("generating examples from = %s", filepath) | |
with open(filepath, "rb") as b, gzip.open(b, "rt", encoding="utf-8") as f: | |
for line in f: | |
if line.strip(): | |
example = json.loads(line) | |
yield id_, example | |
id_ += 1 | |