open-riksdag / open-riksdag.py
winobes's picture
Update open-riksdag.py
978b455
# Config file by Bill Noble, adapted from the Kubhist 2 dataset by Simon Hengchen, https://hengchen.net
import os
import datasets
import json
from datasets.data_files import DataFilesDict
from pathlib import Path
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """
This is a dataset of text from the Riksdag, Sweden's legislative body.
The original data is availble without a license under the Re-use of Public Administration Documents Act (2010:566) at https://data.riksdagen.se/data/dokument
This dataset is derivative of a version compiled by Språkbanken Text (SBX) at the University of Gothenburg (Sweden). That version consists of XML files split by document source (motions, questions, protocol, etc.) and includes additional linguistic annotations. It is available under a CC BY 4.0 license at https://spraakbanken.gu.se/resurser/rd
The focus of this huggingface dataset is to organise the data for fine-grained diachronic modeling. To that end, this dataset includes two configurations:
# Configurations
## `sentences`
This configuration provides sentences in raw text format with their original whitespace. Sentence-level tokenisation was performed by Språkbanken.
`datasets.load_dataset('ChangeIsKey/open-riksdag', 'sentences', years=YEARS, sources=SOURCES)`
- `YEARS:list(int)` - years in the range [1960, 2022] from which sentences are drawn
- `SOURCES:list(str)` - the Open Riksdag data is split into different data sources
- `bet` _Betänkande_ ~ reports
- `ds`
- `eun` _EUN_ ~ documents from the EU committee
- `flista` _Föredragningslistor_ ~ Lists of speeches
- `fpm` _faktapromemorior_ ~ factual memoranda on EU commission proposals
- `frsrdg` _Framställning/redogörelse_ ~ petitions and reports from bodies appointed by the Riksdag
...
data fields
- `sentence` -
- `date` -
- `source`
- `document_id`
...
## `targets-103`
- `target_lemma`
- `start`
- `end`
In a nutshell, this version offers:
- all sentences including one or more of 103 target words, which were chosen by TF-IDF (described below)
- per-month subsets (with all document types combined)
- one line per sentence (sentences shorter than 4 words were discarded)
- data includes: date, source, document_id, target_word, and text.
License is CC BY 4.0 with attribution.
"""
_ALL_YEARS = list(range(1961, 2023))
_ALL_SOURCES = ['bet', 'ds', 'eun', 'flista', 'fpm', 'frsrdg', 'ip', 'kammakt', 'kom', 'mot', 'ovr', 'prop', 'prot', 'rskr', 'samtr', 'skfr', 'sou', 'tlista', 'utr', 'utsk', 'yttr']
_ALL_TARGET_TERMS = ['%', 'april', 'arbetsförmedling', 'arbetsgivare', 'arbetslöshet', 'arbetsmarknad', 'arbetsmarknadsminister', 'augusti', 'barn', 'betala', 'bil', 'bolag', 'bostad', 'brott', 'december', 'drabba', 'ekonomisk', 'elev', 'februari', 'finansminister', 'flicka', 'flygplats', 'forskning', 'fru', 'företag', 'försvarsmakt', 'försvarsminister', 'försäkringskassa', 'förälder', 'gammal', 'grupp', 'herr', 'hälsa', 'högskola', 'internationell', 'isolering', 'januari', 'jobb', 'juli', 'juni', 'justitieminister', 'kommun', 'kommunal', 'kostnad', 'krona', 'kultur', 'kunskap', 'kvinna', 'lag', 'lagstiftning', 'landsbygd', 'landsting', 'lokal', 'län', 'lärare', 'm', 'maj', 'man', 'mars', 'migrationsminister', 'miljard', 'miljon', 'miljö', 'miljöminister', 'myndighet', 'mänsklig', 'mål', 'nationell', 'ni', 'november', 'näringsminister', 'offentlig', 'oktober', 'organisation', 'ovanstående', 'person', 'polis', 'procent', 'rapport', 'regel', 'region', 'rättighet', 'september', 'sjukvård', 'skatt', 'socialminister', 'stat', 'statlig', 'statsminister', 'statsråd', 'student', 'stöd', 'trafikverk', 'ung', 'ungdom', 'utbildning', 'utbildningsminister', 'utredning', 'utrikesminister', 'verksamhet', 'våld', 'vård', 'återtagen']
_TERM_TO_ID = {t: i for i,t in enumerate(_ALL_TARGET_TERMS)}
class OpenRiksdagConfig(datasets.BuilderConfig):
"""BuilderConfig for openRD-103."""
def __init__(self, name='sentences', years=_ALL_YEARS, sources=_ALL_SOURCES, targets=_ALL_TARGET_TERMS, **kwargs):
"""Constructs an open-riksdag dataset.
Args:
year: integer year between 1979 and 2019
**kwargs: keyword arguments forwarded to super.
"""
if not all(year in _ALL_YEARS for year in years):
raise ValueError("`years` should contain integers between 1979 and 2019")
self.years = list(set(years))
if not all(year in _ALL_YEARS for year in years):
raise ValueError(f"`sources` should be a subset of {_ALL_SOURCES}")
self.sources = list(set(sources))
try:
if targets and isinstance(targets[0], str):
targets = [_TERM_TO_ID[t] for t in targets]
assert all(t in _TERM_TO_ID.values() for t in targets)
targets = list(set(targets))
except (KeyError, AssertionError) as e:
print(e)
raise ValueError(f"`targets` should be a subset of {_ALL_TARGET_TERMS} or integer indexes there of")
self.targets = list(set(targets))
super().__init__(
name = name,
version = datasets.Version("1.1.0", ""),
data_dir = kwargs.get('data_dir', "./data") ,
**kwargs
)
class OpenRiksdag(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = OpenRiksdagConfig
BUILDER_CONFIGS = [
OpenRiksdagConfig(
name='sentences',
description="Raws sentences from Riksdagens öppnadata",
),
OpenRiksdagConfig(
name='target-103',
description="Sentences from Riksdagens öppna data with a selection of 103 target words"
)
]
def _info(self):
features = {
"sentence": datasets.Value("string"),
"doc_type": datasets.Value("string"),
"doc_id": datasets.Value("string"),
"date": datasets.Value("timestamp[s]")
}
if self.config.name == 'target-103':
target_features = {
"lemma": datasets.Value("string"),
"start": datasets.Value("int32"),
"end": datasets.Value("int32"),
"pos": datasets.Value("string")
}
features = {**features, **target_features}
return datasets.DatasetInfo(
features = datasets.Features(features),
supervised_keys=None,
homepage="https://github.com/ChangeIsKey",
)
def _split_generators(self, dl_manager):
data_dir = Path(self.config.data_dir)/self.config.name
if self.config.name == 'sentences':
possible_files = [data_dir/f"{y}_{s}.jsonl.bz2" for y in self.config.years for s in self.config.sources]
elif self.config.name == 'target-103':
possible_files = [data_dir/f"{t:03d}/{y}_target{t:03d}_{s}.jsonl.bz2" for y in self.config.years
for t in self.config.targets for s in self.config.sources]
extracted_paths = []
for f in possible_files:
print(f)
try:
extracted_paths.append(dl_manager.download_and_extract(f))
except FileNotFoundError:
continue
return [datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepaths": extracted_paths}
)
]
def _generate_examples(self, filepaths):
"""Yields examples."""
key = 0
for filepath in filepaths:
with open(filepath, encoding='utf-8') as f:
for line in f:
item = json.loads(line)
yield key, item
key+=1