Datasets:

Modalities:
Text
Languages:
English
Size:
< 1K
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
4b4f965
·
1 Parent(s): 516bfe9

upload hubscripts/euadr_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. euadr.py +317 -0
euadr.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import datasets
4
+
5
+ from .bigbiohub import kb_features
6
+ from .bigbiohub import BigBioConfig
7
+ from .bigbiohub import Tasks
8
+
9
+ _LANGUAGES = ['English']
10
+ _PUBMED = True
11
+ _LOCAL = False
12
+ _CITATION = """\
13
+ @article{VANMULLIGEN2012879,
14
+ title = {The EU-ADR corpus: Annotated drugs, diseases, targets, and their relationships},
15
+ journal = {Journal of Biomedical Informatics},
16
+ volume = {45},
17
+ number = {5},
18
+ pages = {879-884},
19
+ year = {2012},
20
+ note = {Text Mining and Natural Language Processing in Pharmacogenomics},
21
+ issn = {1532-0464},
22
+ doi = {https://doi.org/10.1016/j.jbi.2012.04.004},
23
+ url = {https://www.sciencedirect.com/science/article/pii/S1532046412000573},
24
+ author = {Erik M. {van Mulligen} and Annie Fourrier-Reglat and David Gurwitz and Mariam Molokhia and Ainhoa Nieto and Gianluca Trifiro and Jan A. Kors and Laura I. Furlong},
25
+ keywords = {Text mining, Corpus development, Machine learning, Adverse drug reactions},
26
+ abstract = {Corpora with specific entities and relationships annotated are essential to train and evaluate text-mining systems that are developed to extract specific structured information from a large corpus. In this paper we describe an approach where a named-entity recognition system produces a first annotation and annotators revise this annotation using a web-based interface. The agreement figures achieved show that the inter-annotator agreement is much better than the agreement with the system provided annotations. The corpus has been annotated for drugs, disorders, genes and their inter-relationships. For each of the drug–disorder, drug–target, and target–disorder relations three experts have annotated a set of 100 abstracts. These annotated relationships will be used to train and evaluate text-mining software to capture these relationships in texts.}
27
+ }
28
+ """
29
+
30
+ _DATASETNAME = "euadr"
31
+ _DISPLAYNAME = "EU-ADR"
32
+
33
+ _DESCRIPTION = """\
34
+ Corpora with specific entities and relationships annotated are essential to \
35
+ train and evaluate text-mining systems that are developed to extract specific \
36
+ structured information from a large corpus. In this paper we describe an \
37
+ approach where a named-entity recognition system produces a first annotation and \
38
+ annotators revise this annotation using a web-based interface. The agreement \
39
+ figures achieved show that the inter-annotator agreement is much better than the \
40
+ agreement with the system provided annotations. The corpus has been annotated \
41
+ for drugs, disorders, genes and their inter-relationships. For each of the \
42
+ drug-disorder, drug-target, and target-disorder relations three experts \
43
+ have annotated a set of 100 abstracts. These annotated relationships will be \
44
+ used to train and evaluate text-mining software to capture these relationships \
45
+ in texts.
46
+ """
47
+
48
+ _HOMEPAGE = "https://www.sciencedirect.com/science/article/pii/S1532046412000573"
49
+
50
+ _LICENSE = 'License information unavailable'
51
+
52
+ _URL = "https://biosemantics.erasmusmc.nl/downloads/euadr.tgz"
53
+
54
+ _SOURCE_VERSION = "1.0.0"
55
+ _BIGBIO_VERSION = "1.0.0"
56
+
57
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
58
+
59
+
60
+ class EUADR(datasets.GeneratorBasedBuilder):
61
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
62
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
63
+
64
+ DEFAULT_CONFIG_NAME = "euadr_bigbio_kb"
65
+
66
+ BUILDER_CONFIGS = [
67
+ BigBioConfig(
68
+ name="euadr_source",
69
+ version=SOURCE_VERSION,
70
+ description="EU-ADR source schema",
71
+ schema="source",
72
+ subset_id="euadr",
73
+ ),
74
+ BigBioConfig(
75
+ name="euadr_bigbio_kb",
76
+ version=BIGBIO_VERSION,
77
+ description="EU-ADR simplified BigBio schema for named entity recognition and relation extraction",
78
+ schema="bigbio_kb",
79
+ subset_id="euadr",
80
+ ),
81
+ ]
82
+
83
+ def _info(self):
84
+ if self.config.schema == "source":
85
+ features = datasets.Features(
86
+ {
87
+ "pmid": datasets.Value("string"),
88
+ "title": datasets.Value("string"),
89
+ "abstract": datasets.Value("string"),
90
+ "annotations": datasets.Sequence(datasets.Value("string")),
91
+ }
92
+ )
93
+ elif self.config.schema == "bigbio_kb":
94
+ features = kb_features
95
+
96
+ return datasets.DatasetInfo(
97
+ description=_DESCRIPTION,
98
+ features=features,
99
+ supervised_keys=None,
100
+ homepage=_HOMEPAGE,
101
+ license=str(_LICENSE),
102
+ citation=_CITATION,
103
+ )
104
+
105
+ def _split_generators(self, dl_manager):
106
+ urls = _URL
107
+ datapath = dl_manager.download_and_extract(urls)
108
+ return [
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TRAIN,
111
+ gen_kwargs={"datapath": datapath, "dl_manager": dl_manager},
112
+ ),
113
+ ]
114
+
115
+ def _generate_examples(self, datapath, dl_manager):
116
+ def replace_html_special_chars(string):
117
+ # since we are getting the text as an HTML file, we need to replace
118
+ # special characters
119
+ for (i, r) in [
120
+ ("&#34;", '"'),
121
+ ("&quot;", '"'),
122
+ ("&#39;", "'"),
123
+ ("&apos;", "'"),
124
+ ("&#38;", "&"),
125
+ ("&amp;", "&"),
126
+ ("&#60;", "<"),
127
+ ("&lt;", "<"),
128
+ ("&#62;", ">"),
129
+ ("&gt;", ">"),
130
+ ("&#x27;", "'"),
131
+ ]:
132
+ string = string.replace(i, r)
133
+ return string
134
+
135
+ def suppr_blank(l_str):
136
+ r = []
137
+ for string in l_str:
138
+ if len(string) > 0:
139
+ r.append(string)
140
+ return r
141
+
142
+ folder_path = os.path.join(datapath, "euadr_corpus")
143
+ key = 0
144
+ if self.config.schema == "source":
145
+ for filename in os.listdir(folder_path):
146
+ if "_" not in filename:
147
+ corpus_path = dl_manager.download_and_extract(
148
+ f"https://pubmed.ncbi.nlm.nih.gov/{filename[:-4]}/?format=pubmed"
149
+ )
150
+ with open(corpus_path, "r", encoding="latin") as f:
151
+ full_html = replace_html_special_chars(
152
+ ("".join(f.readlines()))
153
+ .replace("\r\n", "")
154
+ .replace("\n", "")
155
+ )
156
+ abstract = " ".join(
157
+ suppr_blank(
158
+ full_html.split("AB -")[-1]
159
+ .split("FAU -")[0]
160
+ .split(" ")
161
+ )
162
+ )
163
+ title = " ".join(
164
+ suppr_blank(
165
+ full_html.split("TI -")[-1].split("PG")[0].split(" ")
166
+ )
167
+ )
168
+ full_text = " ".join([title, abstract])
169
+ with open(
170
+ os.path.join(folder_path, filename), "r", encoding="latin"
171
+ ) as f:
172
+ lines = f.readlines()
173
+ yield key, {
174
+ "pmid": filename[:-4],
175
+ "title": title,
176
+ "abstract": abstract,
177
+ "annotations": lines,
178
+ }
179
+ key += 1
180
+ elif self.config.schema == "bigbio_kb":
181
+ for filename in os.listdir(folder_path):
182
+ if "_" not in filename:
183
+ corpus_path = dl_manager.download_and_extract(
184
+ f"https://pubmed.ncbi.nlm.nih.gov/{filename[:-4]}/?format=pubmed"
185
+ )
186
+ with open(corpus_path, "r", encoding="latin") as f:
187
+ full_html = replace_html_special_chars(
188
+ ("".join(f.readlines()))
189
+ .replace("\r\n", "")
190
+ .replace("\n", "")
191
+ )
192
+ abstract = " ".join(
193
+ suppr_blank(
194
+ full_html.split("AB -")[-1]
195
+ .split("FAU -")[0]
196
+ .split(" ")
197
+ )
198
+ )
199
+ title = " ".join(
200
+ suppr_blank(
201
+ full_html.split("TI -")[-1].split("PG")[0].split(" ")
202
+ )
203
+ )
204
+ full_text = " ".join([title, abstract])
205
+ with open(
206
+ os.path.join(folder_path, filename), "r", encoding="latin"
207
+ ) as f:
208
+ lines = f.readlines()
209
+ data = {
210
+ "id": str(key),
211
+ "document_id": str(key),
212
+ "passages": [],
213
+ "entities": [],
214
+ "events": [],
215
+ "coreferences": [],
216
+ "relations": [],
217
+ }
218
+ key += 1
219
+ data["passages"].append(
220
+ {
221
+ "id": str(key),
222
+ "type": "title",
223
+ "text": [title],
224
+ "offsets": [[0, len(title)]],
225
+ }
226
+ )
227
+ key += 1
228
+ data["passages"].append(
229
+ {
230
+ "id": str(key),
231
+ "type": "abstract",
232
+ "text": [abstract],
233
+ "offsets": [
234
+ [len(title) + 1, len(title) + 1 + len(abstract)]
235
+ ],
236
+ }
237
+ )
238
+ key += 1
239
+ for line in lines:
240
+ line_processed = line.split("\t")
241
+ if line_processed[2] == "relation":
242
+ data["entities"].append(
243
+ {
244
+ "id": str(key),
245
+ "offsets": [
246
+ [
247
+ int(line_processed[7].split(":")[0]),
248
+ int(line_processed[7].split(":")[1]),
249
+ ]
250
+ ],
251
+ "text": [
252
+ full_text[
253
+ int(
254
+ line_processed[7].split(":")[0]
255
+ ) : int(line_processed[7].split(":")[1])
256
+ ]
257
+ ],
258
+ "type": "",
259
+ "normalized": [],
260
+ }
261
+ )
262
+ key += 1
263
+ data["entities"].append(
264
+ {
265
+ "id": str(key),
266
+ "offsets": [
267
+ [
268
+ int(line_processed[8].split(":")[0]),
269
+ int(line_processed[8].split(":")[1]),
270
+ ]
271
+ ],
272
+ "text": [
273
+ full_text[
274
+ int(
275
+ line_processed[8].split(":")[0]
276
+ ) : int(line_processed[8].split(":")[1])
277
+ ]
278
+ ],
279
+ "type": "",
280
+ "normalized": [],
281
+ }
282
+ )
283
+ key += 1
284
+ data["relations"].append(
285
+ {
286
+ "id": str(key),
287
+ "type": line_processed[-1].split("\n")[0],
288
+ "arg1_id": str(key - 2),
289
+ "arg2_id": str(key - 1),
290
+ "normalized": [],
291
+ }
292
+ )
293
+ key += 1
294
+ elif line_processed[2] == "concept":
295
+ data["entities"].append(
296
+ {
297
+ "id": str(key),
298
+ "offsets": [
299
+ [
300
+ int(line_processed[4]),
301
+ int(line_processed[5]),
302
+ ]
303
+ ],
304
+ "text": [
305
+ full_text[
306
+ int(line_processed[4]) : int(
307
+ line_processed[5]
308
+ )
309
+ ]
310
+ ],
311
+ "type": line_processed[-1].split("\n")[0],
312
+ "normalized": [],
313
+ }
314
+ )
315
+ key += 1
316
+ yield key, data
317
+ key += 1