File size: 7,785 Bytes
2ac9868
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
978b455
83146a0
 
2ac9868
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c65977d
 
 
 
 
 
 
2ac9868
 
c65977d
2ac9868
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
# Config file by Bill Noble, adapted from the Kubhist 2 dataset by Simon Hengchen, https://hengchen.net

import os
import datasets
import json
from datasets.data_files import DataFilesDict
from pathlib import Path

logger = datasets.logging.get_logger(__name__)

_DESCRIPTION = """
This is a dataset of text from the Riksdag, Sweden's legislative body. 

The original data is availble without a license under the Re-use of Public Administration Documents Act (2010:566) at https://data.riksdagen.se/data/dokument

This dataset is derivative of a version compiled by Språkbanken Text (SBX) at the University of Gothenburg (Sweden). That version consists of XML files split by document source (motions, questions, protocol, etc.) and includes additional linguistic annotations. It is available under a CC BY 4.0 license at https://spraakbanken.gu.se/resurser/rd 

The focus of this huggingface dataset is to organise the data for fine-grained diachronic modeling. To that end, this dataset includes two configurations:

# Configurations

## `sentences`

This configuration provides sentences in raw text format with their original whitespace. Sentence-level tokenisation was performed by Språkbanken. 

`datasets.load_dataset('ChangeIsKey/open-riksdag', 'sentences', years=YEARS, sources=SOURCES)`

- `YEARS:list(int)` - years in the range [1960, 2022] from which sentences are drawn
- `SOURCES:list(str)` - the Open Riksdag data is split into different data sources
    - `bet` _Betänkande_ ~ reports
    - `ds` 
    - `eun` _EUN_ ~ documents from the EU committee
    - `flista` _Föredragningslistor_ ~ Lists of speeches
    - `fpm` _faktapromemorior_  ~ factual memoranda on EU commission proposals 
    - `frsrdg` _Framställning/redogörelse_ ~ petitions and reports from bodies appointed by the Riksdag
    ...

data fields

- `sentence` - 
- `date` - 
- `source`
- `document_id`
...


## `targets-103`

- `target_lemma`
- `start`
- `end`

In a nutshell, this version offers:

- all sentences including one or more of 103 target words, which were chosen by TF-IDF (described below)
- per-month subsets (with all document types combined)
- one line per sentence (sentences shorter than 4 words were discarded)
- data includes: date, source, document_id, target_word, and text.

License is CC BY 4.0 with attribution.
"""

_ALL_YEARS = list(range(1961, 2023))
_ALL_SOURCES = ['bet', 'ds', 'eun', 'flista', 'fpm', 'frsrdg', 'ip', 'kammakt', 'kom', 'mot', 'ovr', 'prop', 'prot', 'rskr', 'samtr', 'skfr', 'sou', 'tlista', 'utr', 'utsk', 'yttr']
_ALL_TARGET_TERMS = ['%', 'april', 'arbetsförmedling', 'arbetsgivare', 'arbetslöshet', 'arbetsmarknad', 'arbetsmarknadsminister', 'augusti', 'barn', 'betala', 'bil', 'bolag', 'bostad', 'brott', 'december', 'drabba', 'ekonomisk', 'elev', 'februari', 'finansminister', 'flicka', 'flygplats', 'forskning', 'fru', 'företag', 'försvarsmakt', 'försvarsminister', 'försäkringskassa', 'förälder', 'gammal', 'grupp', 'herr', 'hälsa', 'högskola', 'internationell', 'isolering', 'januari', 'jobb', 'juli', 'juni', 'justitieminister', 'kommun', 'kommunal', 'kostnad', 'krona', 'kultur', 'kunskap', 'kvinna', 'lag', 'lagstiftning', 'landsbygd', 'landsting', 'lokal', 'län', 'lärare', 'm', 'maj', 'man', 'mars', 'migrationsminister', 'miljard', 'miljon', 'miljö', 'miljöminister', 'myndighet', 'mänsklig', 'mål', 'nationell', 'ni', 'november', 'näringsminister', 'offentlig', 'oktober', 'organisation', 'ovanstående', 'person', 'polis', 'procent', 'rapport', 'regel', 'region', 'rättighet', 'september', 'sjukvård', 'skatt', 'socialminister', 'stat', 'statlig', 'statsminister', 'statsråd', 'student', 'stöd', 'trafikverk', 'ung', 'ungdom', 'utbildning', 'utbildningsminister', 'utredning', 'utrikesminister', 'verksamhet', 'våld', 'vård', 'återtagen']
_TERM_TO_ID = {t: i for i,t in enumerate(_ALL_TARGET_TERMS)}

class OpenRiksdagConfig(datasets.BuilderConfig):
    """BuilderConfig for openRD-103."""

    def __init__(self, name='sentences', years=_ALL_YEARS, sources=_ALL_SOURCES, targets=_ALL_TARGET_TERMS, **kwargs):
        """Constructs an open-riksdag dataset.
        Args:
        year: integer year between 1979 and 2019
        **kwargs: keyword arguments forwarded to super.
        """

        if not all(year in _ALL_YEARS for year in years):
            raise ValueError("`years` should contain integers between 1979 and 2019")
        self.years = list(set(years))

        if not all(year in _ALL_YEARS for year in years):
            raise ValueError(f"`sources` should be a subset of {_ALL_SOURCES}")
        self.sources = list(set(sources))

        try:
            if targets and isinstance(targets[0], str):
                targets = [_TERM_TO_ID[t] for t in targets]
            assert all(t in _TERM_TO_ID.values() for t in targets)
            targets = list(set(targets))
        except (KeyError, AssertionError) as e:
            print(e)
            raise ValueError(f"`targets` should be a subset of {_ALL_TARGET_TERMS} or integer indexes there of")
        self.targets = list(set(targets))
    
        super().__init__(
            name = name,
            version = datasets.Version("1.1.0", ""),
            data_dir = kwargs.get('data_dir', "./data") ,
            **kwargs
        )

class OpenRiksdag(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIG_CLASS = OpenRiksdagConfig

    BUILDER_CONFIGS = [
        OpenRiksdagConfig(
            name='sentences',
            description="Raws sentences from Riksdagens öppnadata",
        ),
        OpenRiksdagConfig(
            name='target-103',
            description="Sentences from Riksdagens öppna data with a selection of 103 target words"
        )
    ]

    def _info(self):

        features = {
            "sentence": datasets.Value("string"), 
            "doc_type": datasets.Value("string"), 
            "doc_id": datasets.Value("string"), 
            "date": datasets.Value("timestamp[s]")
         }

        if self.config.name == 'target-103':
            target_features = {
                "lemma": datasets.Value("string"), 
                "start": datasets.Value("int32"), 
                "end": datasets.Value("int32"), 
                "pos": datasets.Value("string")
            }
            features = {**features, **target_features}

        return datasets.DatasetInfo(
            features = datasets.Features(features),
            supervised_keys=None,
            homepage="https://github.com/ChangeIsKey",
        )

    def _split_generators(self, dl_manager):

        data_dir = Path(self.config.data_dir)/self.config.name 
        if self.config.name == 'sentences':
            possible_files = [data_dir/f"{y}_{s}.jsonl.bz2" for y in self.config.years for s in self.config.sources]
        elif self.config.name == 'target-103':
            possible_files = [data_dir/f"{t:03d}/{y}_target{t:03d}_{s}.jsonl.bz2" for y in self.config.years 
                                                                for t in self.config.targets for s in self.config.sources]
        extracted_paths = []
        for f in possible_files:
            print(f)
            try:
                extracted_paths.append(dl_manager.download_and_extract(f))
            except FileNotFoundError:
                continue
        return [datasets.SplitGenerator(
            name=datasets.Split.TRAIN, 
            gen_kwargs={"filepaths": extracted_paths}
            )
        ]

    def _generate_examples(self, filepaths):
        """Yields examples."""
        key = 0
        for filepath in filepaths:
            with open(filepath, encoding='utf-8') as f:
                for line in f:
                    item = json.loads(line)
                    yield key, item
                    key+=1