File size: 3,236 Bytes
a5bcf7e
 
252d7f6
1d55447
77742f3
252d7f6
 
fdf3c42
77742f3
 
 
 
 
 
 
252d7f6
 
 
77742f3
252d7f6
 
 
 
fdf3c42
252d7f6
 
 
 
 
fdf3c42
252d7f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1d55447
252d7f6
a5bcf7e
252d7f6
 
 
 
 
 
 
 
 
 
 
 
77742f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252d7f6
 
 
77742f3
 
 
 
 
252d7f6
 
 
77742f3
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import datasets
from datasets import GeneratorBasedBuilder, BuilderConfig, Sequence, Value

from .parsing import parse_incr
from .train_test_split import train_test_split


class LanguageSpecificConfig(BuilderConfig):
    def __init__(
        self,
        language: str,
        data_file: str,
        train_fraction: float = 0.8,
        **kwargs
    ):
        super().__init__(**kwargs)
        self.language = language
        self.data_file = data_file
        self.train_fraction = train_fraction


class EnhancedCobaldDataset(GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        LanguageSpecificConfig(
            name="en",
            language="en",
            data_file="https://raw.githubusercontent.com/CobaldAnnotation/CobaldEng/refs/heads/main/enhanced/train.conllu",
            description="English dataset."
        ),
        LanguageSpecificConfig(
            name="ru",
            language="ru",
            data_file="https://raw.githubusercontent.com/CobaldAnnotation/CobaldRus/refs/heads/main/enhanced/train.conllu",
            description="Russian dataset."
        ),
        # Other languages here
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description="A CoBaLD dataset in CoNLL-U plus format.",
            features=datasets.Features({
                "ids": Sequence(Value("string")),
                "words": Sequence(Value("string")),
                "lemmas": Sequence(Value("string")),
                "upos": Sequence(Value("string")),
                "xpos": Sequence(Value("string")),
                # huggingface datasets can't handle dicts with dynamic keys, so represent feats as string
                "feats": Sequence(Value("string")),
                "heads": Sequence(Value("int32")),
                "deprels": Sequence(Value("string")),
                "deps": Sequence(Value("string")),
                "miscs": Sequence(Value("string")),
                "deepslots": Sequence(Value("string")),
                "semclasses": Sequence(Value("string")),
                "sent_id": Value("string"),
                "text": Value("string"),
            })
        )

    def _split_generators(self, dl_manager):
        data_path = dl_manager.download_and_extract(self.config.data_file)
        # Load all sentences in memory, since train-test split depends on data.
        sentences = list(parse_incr(data_path))
        train_sentences, validation_sentences = train_test_split(
            sentences,
            train_fraction=self.config.train_fraction,
            tagsets_names=[
                'upos',
                'xpos',
                'feats',
                'deprels',
                'deps',
                'miscs',
                'deepslots',
                'semclasses'
            ]
        )
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"examples": train_sentences}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={"examples": validation_sentences}
            )
        ]

    def _generate_examples(self, examples: list):
        yield from enumerate(examples)