|
import datasets |
|
from datasets import GeneratorBasedBuilder, BuilderConfig, Sequence, Value |
|
|
|
from .parsing import parse_incr |
|
from .train_test_split import train_test_split |
|
|
|
|
|
class LanguageSpecificConfig(BuilderConfig): |
|
def __init__( |
|
self, |
|
language: str, |
|
data_file: str, |
|
train_fraction: float = 0.8, |
|
**kwargs |
|
): |
|
super().__init__(**kwargs) |
|
self.language = language |
|
self.data_file = data_file |
|
self.train_fraction = train_fraction |
|
|
|
|
|
class EnhancedCobaldDataset(GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
LanguageSpecificConfig( |
|
name="en", |
|
language="en", |
|
data_file="https://raw.githubusercontent.com/CobaldAnnotation/CobaldEng/refs/heads/main/enhanced/train.conllu", |
|
description="English dataset." |
|
), |
|
LanguageSpecificConfig( |
|
name="ru", |
|
language="ru", |
|
data_file="https://raw.githubusercontent.com/CobaldAnnotation/CobaldRus/refs/heads/main/enhanced/train.conllu", |
|
description="Russian dataset." |
|
), |
|
|
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description="A CoBaLD dataset in CoNLL-U plus format.", |
|
features=datasets.Features({ |
|
"ids": Sequence(Value("string")), |
|
"words": Sequence(Value("string")), |
|
"lemmas": Sequence(Value("string")), |
|
"upos": Sequence(Value("string")), |
|
"xpos": Sequence(Value("string")), |
|
|
|
"feats": Sequence(Value("string")), |
|
"heads": Sequence(Value("int32")), |
|
"deprels": Sequence(Value("string")), |
|
"deps": Sequence(Value("string")), |
|
"miscs": Sequence(Value("string")), |
|
"deepslots": Sequence(Value("string")), |
|
"semclasses": Sequence(Value("string")), |
|
"sent_id": Value("string"), |
|
"text": Value("string"), |
|
}) |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_path = dl_manager.download_and_extract(self.config.data_file) |
|
|
|
sentences = list(parse_incr(data_path)) |
|
train_sentences, validation_sentences = train_test_split( |
|
sentences, |
|
train_fraction=self.config.train_fraction, |
|
tagsets_names=[ |
|
'upos', |
|
'xpos', |
|
'feats', |
|
'deprels', |
|
'deps', |
|
'miscs', |
|
'deepslots', |
|
'semclasses' |
|
] |
|
) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"examples": train_sentences} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={"examples": validation_sentences} |
|
) |
|
] |
|
|
|
def _generate_examples(self, examples: list): |
|
yield from enumerate(examples) |