File size: 1,629 Bytes
9525005
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import os
import json
import datasets

from sklearn.model_selection import train_test_split

_DATASET_LABELS = ['O', 'B-CITY', 'I-CITY', 'B-NAMES', 'I-NAMES', 'B-DATE', 'I-DATE']

class Custom(datasets.GeneratorBasedBuilder):

    def _info(self):
        return datasets.DatasetInfo(
            description='',
            features=datasets.Features(
                {
                    'id': datasets.Value('string'),
                    'tokens': datasets.Sequence(datasets.Value('string')),
                    'ner_tags': datasets.Sequence(
                        datasets.features.ClassLabel(
                            names=_DATASET_LABELS
                        )
                    ),
                }
            ),
            supervised_keys=None,
            homepage='',
            citation='',
        )

    def _split_generators(self, dl_manager):
        data_path = dl_manager.download_and_extract("data.jsonl")

        with open(data_path, 'r') as file:
            lines = file.readlines()

        train_lines, valid_lines = train_test_split(lines, test_size=0.2, random_state=42)
        
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'lines': train_lines}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'lines': valid_lines}),
        ]

    def _generate_examples(self, lines):
        for guid, line in enumerate(lines):
            data = json.loads(line)
            yield guid, {
                'id': str(guid),
                'tokens': data['words'],
                'ner_tags': data['pos'],
            }