File size: 5,457 Bytes
24d8527 b01f6df 24d8527 d942734 24d8527 7b36943 24d8527 7b36943 ccbe4b4 1bdb81b 7b36943 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
"""X-stance dataset for German and French/Italian stance detection"""
import csv
import json
import os
import datasets
_CITATION = """\
@inproceedings{vamvas2020xstance,
author = "Vamvas, Jannis and Sennrich, Rico",
title = "{X-Stance}: A Multilingual Multi-Target Dataset for Stance Detection",
booktitle = "Proceedings of the 5th Swiss Text Analytics Conference (SwissText) \& 16th Conference on Natural Language Processing (KONVENS)",
address = "Zurich, Switzerland",
year = "2020",
month = "jun",
url = "http://ceur-ws.org/Vol-2624/paper9.pdf"
}
"""
_DESCRIPTION = """\
The x-stance dataset contains more than 150 political questions, and 67k comments written by candidates on those questions. The comments are partly German, partly French and Italian. The data have been extracted from the Swiss voting advice platform Smartvote.
"""
_HOMEPAGE = "https://github.com/Smu-Tan/ZS-NMT-Variations/tree/main"
_LICENSE = "cc-by-4.0"
_langs = [
'af',
'am',
'ar',
'ast',
'be',
'bg',
'bn',
'bs',
'ca',
'cs',
'da',
'de',
'es',
'fr',
'gu',
'ha',
'he',
'hi',
'is',
'it',
'kab',
'kn',
'lb',
'mr',
'mt',
'ne',
'nl',
'no',
'oc',
'pl',
'pt',
'ro',
'ru',
'sd',
'so',
'sr',
'sv',
'ti',
'uk',
'ur'
]
_En_centric_Pairs = ['en-'+i for i in _langs]
_ZS_Pairs = [i+'-'+j for i in _langs for j in _langs if i!=j]
class EC40Config(datasets.BuilderConfig):
def __init__(self, language_pair, **kwargs):
super().__init__(**kwargs)
"""
Args:
language_pair: language pair, you want to load
**kwargs: keyword arguments forwarded to super.
"""
self.language_pair = language_pair
class EC40(datasets.GeneratorBasedBuilder):
"""EC40 is English-centric, meaning that all training pairs include English on either the source or target side."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIG_CLASS = EC40Config
BUILDER_CONFIGS = [
EC40Config(name=pair, description=None, language_pair=pair)
for pair in _En_centric_Pairs + _ZS_Pairs
]
def _info(self):
src_tag, tgt_tag = self.config.language_pair.split("-")
return datasets.DatasetInfo(
description=None,
features=datasets.Features({"translation": datasets.features.Translation(languages=(src_tag, tgt_tag))}),
supervised_keys=(src_tag, tgt_tag),
homepage="https://github.com/Smu-Tan/ZS-NMT-Variations/tree/main/EC40_dataset",
citation=None,
)
def _split_generators(self, dl_manager):
lang_pair = self.config.language_pair
src_tag, tgt_tag = lang_pair.split("-")
train_src = dl_manager.download_and_extract(f"EC40-train-set/{self.config.language_pair}.{src_tag}")
train_tgt = dl_manager.download_and_extract(f"EC40-train-set/{self.config.language_pair}.{tgt_tag}")
valid_src = dl_manager.download_and_extract(f"Ntrex-eval-set/test.{self.config.language_pair}.{src_tag}")
valid_tgt = dl_manager.download_and_extract(f"Ntrex-eval-set/test.{self.config.language_pair}.{tgt_tag}")
test_src = dl_manager.download_and_extract(f"Flores200-test-set/en-test-set/test.{self.config.language_pair}.{src_tag}")
test_tgt = dl_manager.download_and_extract(f"Flores200-test-set/en-test-set/test.{self.config.language_pair}.{tgt_tag}")
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_src, "labelpath": train_tgt, "split": "train",}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": valid_src, "labelpath": valid_tgt, "split": "validation"}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_src, "labelpath": test_tgt, "split": "test"}),
]
def _generate_examples(self, filepath, labelpath, split):
"""Yields examples."""
src_tag, tgt_tag = self.config.language_pair.split("-")
src, tgt = None, None
with open(filepath, encoding="utf-8") as f:
src = f.read().decode("utf-8").split("\n")
with open(labelpath, encoding="utf-8") as f:
tgt = f.read().decode("utf-8").split("\n")
if src is not None and tgt is not None:
for idx, (s, t) in enumerate(zip(src, tgt)):
yield idx, {"translation": {src_tag: s, tgt_tag: t}}
#for path, f in filepath:
# src = f.read().decode("utf-8").split("\n")
# tgt = f.read().decode("utf-8").split("\n")
# if src is not None and tgt is not None:
# for idx, (s, t) in enumerate(zip(src, tgt)):
# yield idx, {"translation": {src_tag: s, tgt_tag: t}}
# break
#for path, f in files:
# if path == filepath:
# src = f.read().decode("utf-8").split("\n")#[:-1]
# elif path == labelpath:
# tgt = f.read().decode("utf-8").split("\n")#[:-1]
# if src is not None and tgt is not None:
# for idx, (s, t) in enumerate(zip(src, tgt)):
# yield idx, {"translation": {src_tag: s, tgt_tag: t}}
# break |