Datasets:

Size:
n<1K
License:
ArneBinder commited on
Commit
196184d
·
1 Parent(s): 943d45f

Create argmicro.py

Browse files
Files changed (1) hide show
  1. argmicro.py +196 -0
argmicro.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The Argumentative Microtext Corpus for German and English Argumentation Mining."""
2
+ import glob
3
+ import logging
4
+ import os
5
+ from os.path import abspath, isdir
6
+ from pathlib import Path
7
+ from xml.etree import ElementTree
8
+
9
+ import datasets
10
+
11
+ _CITATION = """\
12
+ @inproceedings{peldszus2015annotated,
13
+ title={An annotated corpus of argumentative microtexts},
14
+ author={Peldszus, Andreas and Stede, Manfred},
15
+ booktitle={Argumentation and Reasoned Action: Proceedings of the 1st European Conference on Argumentation, Lisbon},
16
+ volume={2},
17
+ pages={801--815},
18
+ year={2015}
19
+ }
20
+ """
21
+
22
+ _DESCRIPTION = "The Argumentative Microtext Corpus for German and English Argumentation Mining."
23
+
24
+ _HOMEPAGE = "http://angcl.ling.uni-potsdam.de/resources/argmicro.html"
25
+
26
+ _LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License, see https://creativecommons.org/licenses/by-nc-sa/4.0/"
27
+
28
+
29
+ # The HuggingFace dataset library don't host the datasets but only point to the original files
30
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
31
+ _URL = "https://github.com/peldszus/arg-microtexts/archive/refs/heads/master.zip"
32
+
33
+ _VERSION = datasets.Version("1.0.0")
34
+
35
+ _STANCE_CLASS_LABELS = ["con", "pro", "unclear", "UNDEFINED"]
36
+ _ADU_CLASS_LABELS = ["opp", "pro"]
37
+ _EDGE_CLASS_LABELS = ["seg", "sup", "exa", "add", "reb", "und"]
38
+
39
+
40
+ logger = logging.getLogger(__name__)
41
+
42
+
43
+ class ArgMicro(datasets.GeneratorBasedBuilder):
44
+ """ArgMicro is a argumentation mining dataset."""
45
+
46
+ BUILDER_CONFIGS = [
47
+ datasets.BuilderConfig(name="en"),
48
+ datasets.BuilderConfig(name="de"),
49
+ ]
50
+
51
+ def _info(self):
52
+ features = datasets.Features(
53
+ {
54
+ "id": datasets.Value("string"),
55
+ "topic_id": datasets.Value("string"),
56
+ "stance": datasets.ClassLabel(names=_STANCE_CLASS_LABELS),
57
+ "text": datasets.Value("string"),
58
+ "edus": datasets.Sequence(
59
+ {
60
+ "id": datasets.Value("string"),
61
+ "start": datasets.Value("int32"),
62
+ "end": datasets.Value("int32"),
63
+ }
64
+ ),
65
+ "adus": datasets.Sequence(
66
+ {
67
+ "id": datasets.Value("string"),
68
+ "type": datasets.ClassLabel(names=_ADU_CLASS_LABELS),
69
+ }
70
+ ),
71
+ "edges": datasets.Sequence(
72
+ {
73
+ "id": datasets.Value("string"),
74
+ "src": datasets.Value("string"),
75
+ "trg": datasets.Value("string"),
76
+ "type": datasets.ClassLabel(names=_EDGE_CLASS_LABELS),
77
+ }
78
+ ),
79
+ }
80
+ )
81
+
82
+ return datasets.DatasetInfo(
83
+ # This is the description that will appear on the datasets page.
84
+ description=_DESCRIPTION,
85
+ # This defines the different columns of the dataset and their types
86
+ features=features, # Here we define them above because they are different between the two configurations
87
+ # If there's a common (input, target) tuple from the features,
88
+ # specify them here. They'll be used if as_supervised=True in
89
+ # builder.as_dataset.
90
+ supervised_keys=None,
91
+ # Homepage of the dataset for documentation
92
+ homepage=_HOMEPAGE,
93
+ # License for the dataset if available
94
+ license=_LICENSE,
95
+ # Citation for the dataset
96
+ citation=_CITATION,
97
+ )
98
+
99
+ def _split_generators(self, dl_manager):
100
+ """Returns SplitGenerators."""
101
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
102
+
103
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
104
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
105
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
106
+
107
+ if dl_manager.manual_dir is not None:
108
+ base_path = abspath(dl_manager.manual_dir)
109
+ if not isdir(base_path):
110
+ base_path = os.path.join(dl_manager.extract(base_path), "arg-microtexts-master")
111
+ else:
112
+ # TODO: test this!
113
+ base_path = os.path.join(
114
+ dl_manager.download_and_extract(_URL), "arg-microtexts-master"
115
+ )
116
+ base_path = Path(base_path) / "corpus"
117
+
118
+ try:
119
+ from lxml import etree
120
+
121
+ dtd = etree.DTD(base_path / "arggraph.dtd")
122
+ except ModuleNotFoundError:
123
+ logger.warning("lxml not installed. Skipping DTD validation.")
124
+ dtd = None
125
+ etree = None
126
+
127
+ return [
128
+ datasets.SplitGenerator(
129
+ name=datasets.Split.TRAIN,
130
+ gen_kwargs={"path": base_path / self.config.name, "dtd": dtd, "etree": etree},
131
+ ),
132
+ ]
133
+
134
+ def _generate_examples(self, path, dtd=None, etree=None):
135
+ """Yields examples."""
136
+ # This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
137
+ # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
138
+ # The key is not important, it's more here for legacy reason (legacy from tfds)
139
+
140
+ _id = 0
141
+ text_file_names = sorted(glob.glob(f"{path}/*.txt"))
142
+ if len(text_file_names) == 0:
143
+ raise Exception(f"No text files found in {path}. Did you set the correct data_dir?")
144
+ invalid_files = []
145
+ for text_file_name in text_file_names:
146
+ txt_fn = Path(text_file_name)
147
+ ann_fn = txt_fn.with_suffix(".xml")
148
+ with open(txt_fn, encoding="utf-8") as f:
149
+ text = f.read()
150
+
151
+ # validate xml file, if dtd is available
152
+ if dtd is not None and etree is not None:
153
+ e = etree.parse(ann_fn)
154
+ v = dtd.validate(e)
155
+ if not v:
156
+ logger.error(f"{ann_fn} is INVALID:")
157
+ logger.error(dtd.error_log.filter_from_errors()[0])
158
+ invalid_files.append(ann_fn)
159
+ continue
160
+
161
+ annotations = ElementTree.parse(ann_fn).getroot()
162
+ edus = []
163
+ start_pos = 0
164
+ for edu in annotations.findall("edu"):
165
+ start = text.find(edu.text, start_pos)
166
+ if start == -1:
167
+ raise Exception(f"Cannot find {edu.text} in {text}")
168
+ end = start + len(edu.text)
169
+ edus.append({"id": edu.attrib["id"], "start": start, "end": end})
170
+ start_pos = end
171
+ adus = [
172
+ {"id": adu.attrib["id"], "type": adu.attrib["type"]}
173
+ for adu in annotations.findall("adu")
174
+ ]
175
+ edges = [
176
+ {
177
+ "id": edge.attrib["id"],
178
+ "src": edge.attrib["src"],
179
+ "trg": edge.attrib["trg"],
180
+ "type": edge.attrib["type"],
181
+ }
182
+ for edge in annotations.findall("edge")
183
+ ]
184
+ yield _id, {
185
+ "id": annotations.attrib["id"],
186
+ "topic_id": annotations.attrib.get("topic_id", "UNDEFINED"),
187
+ "stance": annotations.attrib.get("stance", "UNDEFINED"),
188
+ "text": text,
189
+ "edus": edus,
190
+ "adus": adus,
191
+ "edges": edges,
192
+ }
193
+ _id += 1
194
+
195
+ if len(invalid_files) > 0:
196
+ raise Exception(f"Found {len(invalid_files)} invalid files: {invalid_files}")