Datasets:

Languages:
English
License:
phlobo commited on
Commit
b29f3d4
·
verified ·
1 Parent(s): fd58c45

Update craft based on git version c6bfb36

Browse files
Files changed (3) hide show
  1. README.md +50 -0
  2. bigbiohub.py +592 -0
  3. craft.py +334 -0
README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ language:
4
+ - en
5
+ bigbio_language:
6
+ - English
7
+ license: cc-by-3.0
8
+ multilinguality: monolingual
9
+ bigbio_license_shortname: CC_BY_3p0_US
10
+ pretty_name: CRAFT
11
+ homepage: https://github.com/UCDenver-ccp/CRAFT
12
+ bigbio_pubmed: True
13
+ bigbio_public: True
14
+ bigbio_tasks:
15
+ - NAMED_ENTITY_RECOGNITION
16
+ - NAMED_ENTITY_DISAMBIGUATION
17
+ ---
18
+
19
+
20
+ # Dataset Card for CRAFT
21
+
22
+ ## Dataset Description
23
+
24
+ - **Homepage:** https://github.com/UCDenver-ccp/CRAFT
25
+ - **Pubmed:** True
26
+ - **Public:** True
27
+ - **Tasks:** NER,NED
28
+
29
+
30
+ This dataset contains the CRAFT corpus, a collection of 97 articles from the PubMed Central Open Access subset,
31
+ each of which has been annotated along a number of different axes spanning structural, coreference, and concept
32
+ annotation.
33
+
34
+
35
+
36
+ ## Citation Information
37
+
38
+ ```
39
+ @article{bada2012concept,
40
+ title={Concept annotation in the CRAFT corpus},
41
+ author={Bada, Michael and Eckert, Miriam and Evans, Donald and Garcia, Kristin and Shipley, Krista and Sitnikov, \
42
+ Dmitry and Baumgartner, William A and Cohen, K Bretonnel and Verspoor, Karin and Blake, Judith A and others},
43
+ journal={BMC bioinformatics},
44
+ volume={13},
45
+ number={1},
46
+ pages={1--20},
47
+ year={2012},
48
+ publisher={BioMed Central}
49
+ }
50
+ ```
bigbiohub.py ADDED
@@ -0,0 +1,592 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ from dataclasses import dataclass
3
+ from enum import Enum
4
+ import logging
5
+ from pathlib import Path
6
+ from types import SimpleNamespace
7
+ from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple
8
+
9
+ import datasets
10
+
11
+ if TYPE_CHECKING:
12
+ import bioc
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
18
+
19
+
20
+ @dataclass
21
+ class BigBioConfig(datasets.BuilderConfig):
22
+ """BuilderConfig for BigBio."""
23
+
24
+ name: str = None
25
+ version: datasets.Version = None
26
+ description: str = None
27
+ schema: str = None
28
+ subset_id: str = None
29
+
30
+
31
+ class Tasks(Enum):
32
+ NAMED_ENTITY_RECOGNITION = "NER"
33
+ NAMED_ENTITY_DISAMBIGUATION = "NED"
34
+ EVENT_EXTRACTION = "EE"
35
+ RELATION_EXTRACTION = "RE"
36
+ COREFERENCE_RESOLUTION = "COREF"
37
+ QUESTION_ANSWERING = "QA"
38
+ TEXTUAL_ENTAILMENT = "TE"
39
+ SEMANTIC_SIMILARITY = "STS"
40
+ TEXT_PAIRS_CLASSIFICATION = "TXT2CLASS"
41
+ PARAPHRASING = "PARA"
42
+ TRANSLATION = "TRANSL"
43
+ SUMMARIZATION = "SUM"
44
+ TEXT_CLASSIFICATION = "TXTCLASS"
45
+
46
+
47
+ entailment_features = datasets.Features(
48
+ {
49
+ "id": datasets.Value("string"),
50
+ "premise": datasets.Value("string"),
51
+ "hypothesis": datasets.Value("string"),
52
+ "label": datasets.Value("string"),
53
+ }
54
+ )
55
+
56
+ pairs_features = datasets.Features(
57
+ {
58
+ "id": datasets.Value("string"),
59
+ "document_id": datasets.Value("string"),
60
+ "text_1": datasets.Value("string"),
61
+ "text_2": datasets.Value("string"),
62
+ "label": datasets.Value("string"),
63
+ }
64
+ )
65
+
66
+ qa_features = datasets.Features(
67
+ {
68
+ "id": datasets.Value("string"),
69
+ "question_id": datasets.Value("string"),
70
+ "document_id": datasets.Value("string"),
71
+ "question": datasets.Value("string"),
72
+ "type": datasets.Value("string"),
73
+ "choices": [datasets.Value("string")],
74
+ "context": datasets.Value("string"),
75
+ "answer": datasets.Sequence(datasets.Value("string")),
76
+ }
77
+ )
78
+
79
+ text_features = datasets.Features(
80
+ {
81
+ "id": datasets.Value("string"),
82
+ "document_id": datasets.Value("string"),
83
+ "text": datasets.Value("string"),
84
+ "labels": [datasets.Value("string")],
85
+ }
86
+ )
87
+
88
+ text2text_features = datasets.Features(
89
+ {
90
+ "id": datasets.Value("string"),
91
+ "document_id": datasets.Value("string"),
92
+ "text_1": datasets.Value("string"),
93
+ "text_2": datasets.Value("string"),
94
+ "text_1_name": datasets.Value("string"),
95
+ "text_2_name": datasets.Value("string"),
96
+ }
97
+ )
98
+
99
+ kb_features = datasets.Features(
100
+ {
101
+ "id": datasets.Value("string"),
102
+ "document_id": datasets.Value("string"),
103
+ "passages": [
104
+ {
105
+ "id": datasets.Value("string"),
106
+ "type": datasets.Value("string"),
107
+ "text": datasets.Sequence(datasets.Value("string")),
108
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
109
+ }
110
+ ],
111
+ "entities": [
112
+ {
113
+ "id": datasets.Value("string"),
114
+ "type": datasets.Value("string"),
115
+ "text": datasets.Sequence(datasets.Value("string")),
116
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
117
+ "normalized": [
118
+ {
119
+ "db_name": datasets.Value("string"),
120
+ "db_id": datasets.Value("string"),
121
+ }
122
+ ],
123
+ }
124
+ ],
125
+ "events": [
126
+ {
127
+ "id": datasets.Value("string"),
128
+ "type": datasets.Value("string"),
129
+ # refers to the text_bound_annotation of the trigger
130
+ "trigger": {
131
+ "text": datasets.Sequence(datasets.Value("string")),
132
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
133
+ },
134
+ "arguments": [
135
+ {
136
+ "role": datasets.Value("string"),
137
+ "ref_id": datasets.Value("string"),
138
+ }
139
+ ],
140
+ }
141
+ ],
142
+ "coreferences": [
143
+ {
144
+ "id": datasets.Value("string"),
145
+ "entity_ids": datasets.Sequence(datasets.Value("string")),
146
+ }
147
+ ],
148
+ "relations": [
149
+ {
150
+ "id": datasets.Value("string"),
151
+ "type": datasets.Value("string"),
152
+ "arg1_id": datasets.Value("string"),
153
+ "arg2_id": datasets.Value("string"),
154
+ "normalized": [
155
+ {
156
+ "db_name": datasets.Value("string"),
157
+ "db_id": datasets.Value("string"),
158
+ }
159
+ ],
160
+ }
161
+ ],
162
+ }
163
+ )
164
+
165
+
166
+ TASK_TO_SCHEMA = {
167
+ Tasks.NAMED_ENTITY_RECOGNITION.name: "KB",
168
+ Tasks.NAMED_ENTITY_DISAMBIGUATION.name: "KB",
169
+ Tasks.EVENT_EXTRACTION.name: "KB",
170
+ Tasks.RELATION_EXTRACTION.name: "KB",
171
+ Tasks.COREFERENCE_RESOLUTION.name: "KB",
172
+ Tasks.QUESTION_ANSWERING.name: "QA",
173
+ Tasks.TEXTUAL_ENTAILMENT.name: "TE",
174
+ Tasks.SEMANTIC_SIMILARITY.name: "PAIRS",
175
+ Tasks.TEXT_PAIRS_CLASSIFICATION.name: "PAIRS",
176
+ Tasks.PARAPHRASING.name: "T2T",
177
+ Tasks.TRANSLATION.name: "T2T",
178
+ Tasks.SUMMARIZATION.name: "T2T",
179
+ Tasks.TEXT_CLASSIFICATION.name: "TEXT",
180
+ }
181
+
182
+ SCHEMA_TO_TASKS = defaultdict(set)
183
+ for task, schema in TASK_TO_SCHEMA.items():
184
+ SCHEMA_TO_TASKS[schema].add(task)
185
+ SCHEMA_TO_TASKS = dict(SCHEMA_TO_TASKS)
186
+
187
+ VALID_TASKS = set(TASK_TO_SCHEMA.keys())
188
+ VALID_SCHEMAS = set(TASK_TO_SCHEMA.values())
189
+
190
+ SCHEMA_TO_FEATURES = {
191
+ "KB": kb_features,
192
+ "QA": qa_features,
193
+ "TE": entailment_features,
194
+ "T2T": text2text_features,
195
+ "TEXT": text_features,
196
+ "PAIRS": pairs_features,
197
+ }
198
+
199
+
200
+ def get_texts_and_offsets_from_bioc_ann(ann: "bioc.BioCAnnotation") -> Tuple:
201
+
202
+ offsets = [(loc.offset, loc.offset + loc.length) for loc in ann.locations]
203
+
204
+ text = ann.text
205
+
206
+ if len(offsets) > 1:
207
+ i = 0
208
+ texts = []
209
+ for start, end in offsets:
210
+ chunk_len = end - start
211
+ texts.append(text[i : chunk_len + i])
212
+ i += chunk_len
213
+ while i < len(text) and text[i] == " ":
214
+ i += 1
215
+ else:
216
+ texts = [text]
217
+
218
+ return offsets, texts
219
+
220
+
221
+ def remove_prefix(a: str, prefix: str) -> str:
222
+ if a.startswith(prefix):
223
+ a = a[len(prefix) :]
224
+ return a
225
+
226
+
227
+ def parse_brat_file(
228
+ txt_file: Path,
229
+ annotation_file_suffixes: List[str] = None,
230
+ parse_notes: bool = False,
231
+ ) -> Dict:
232
+ """
233
+ Parse a brat file into the schema defined below.
234
+ `txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
235
+ Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
236
+ e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
237
+ Will include annotator notes, when `parse_notes == True`.
238
+ brat_features = datasets.Features(
239
+ {
240
+ "id": datasets.Value("string"),
241
+ "document_id": datasets.Value("string"),
242
+ "text": datasets.Value("string"),
243
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
244
+ {
245
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
246
+ "text": datasets.Sequence(datasets.Value("string")),
247
+ "type": datasets.Value("string"),
248
+ "id": datasets.Value("string"),
249
+ }
250
+ ],
251
+ "events": [ # E line in brat
252
+ {
253
+ "trigger": datasets.Value(
254
+ "string"
255
+ ), # refers to the text_bound_annotation of the trigger,
256
+ "id": datasets.Value("string"),
257
+ "type": datasets.Value("string"),
258
+ "arguments": datasets.Sequence(
259
+ {
260
+ "role": datasets.Value("string"),
261
+ "ref_id": datasets.Value("string"),
262
+ }
263
+ ),
264
+ }
265
+ ],
266
+ "relations": [ # R line in brat
267
+ {
268
+ "id": datasets.Value("string"),
269
+ "head": {
270
+ "ref_id": datasets.Value("string"),
271
+ "role": datasets.Value("string"),
272
+ },
273
+ "tail": {
274
+ "ref_id": datasets.Value("string"),
275
+ "role": datasets.Value("string"),
276
+ },
277
+ "type": datasets.Value("string"),
278
+ }
279
+ ],
280
+ "equivalences": [ # Equiv line in brat
281
+ {
282
+ "id": datasets.Value("string"),
283
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
284
+ }
285
+ ],
286
+ "attributes": [ # M or A lines in brat
287
+ {
288
+ "id": datasets.Value("string"),
289
+ "type": datasets.Value("string"),
290
+ "ref_id": datasets.Value("string"),
291
+ "value": datasets.Value("string"),
292
+ }
293
+ ],
294
+ "normalizations": [ # N lines in brat
295
+ {
296
+ "id": datasets.Value("string"),
297
+ "type": datasets.Value("string"),
298
+ "ref_id": datasets.Value("string"),
299
+ "resource_name": datasets.Value(
300
+ "string"
301
+ ), # Name of the resource, e.g. "Wikipedia"
302
+ "cuid": datasets.Value(
303
+ "string"
304
+ ), # ID in the resource, e.g. 534366
305
+ "text": datasets.Value(
306
+ "string"
307
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
308
+ }
309
+ ],
310
+ ### OPTIONAL: Only included when `parse_notes == True`
311
+ "notes": [ # # lines in brat
312
+ {
313
+ "id": datasets.Value("string"),
314
+ "type": datasets.Value("string"),
315
+ "ref_id": datasets.Value("string"),
316
+ "text": datasets.Value("string"),
317
+ }
318
+ ],
319
+ },
320
+ )
321
+ """
322
+
323
+ example = {}
324
+ example["document_id"] = txt_file.with_suffix("").name
325
+ with txt_file.open() as f:
326
+ example["text"] = f.read()
327
+
328
+ # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
329
+ # for event extraction
330
+ if annotation_file_suffixes is None:
331
+ annotation_file_suffixes = [".a1", ".a2", ".ann"]
332
+
333
+ if len(annotation_file_suffixes) == 0:
334
+ raise AssertionError(
335
+ "At least one suffix for the to-be-read annotation files should be given!"
336
+ )
337
+
338
+ ann_lines = []
339
+ for suffix in annotation_file_suffixes:
340
+ annotation_file = txt_file.with_suffix(suffix)
341
+ try:
342
+ with annotation_file.open() as f:
343
+ ann_lines.extend(f.readlines())
344
+ except Exception:
345
+ continue
346
+
347
+ example["text_bound_annotations"] = []
348
+ example["events"] = []
349
+ example["relations"] = []
350
+ example["equivalences"] = []
351
+ example["attributes"] = []
352
+ example["normalizations"] = []
353
+
354
+ if parse_notes:
355
+ example["notes"] = []
356
+
357
+ for line in ann_lines:
358
+ line = line.strip()
359
+ if not line:
360
+ continue
361
+
362
+ if line.startswith("T"): # Text bound
363
+ ann = {}
364
+ fields = line.split("\t")
365
+
366
+ ann["id"] = fields[0]
367
+ ann["type"] = fields[1].split()[0]
368
+ ann["offsets"] = []
369
+ span_str = remove_prefix(fields[1], (ann["type"] + " "))
370
+ text = fields[2]
371
+ for span in span_str.split(";"):
372
+ start, end = span.split()
373
+ ann["offsets"].append([int(start), int(end)])
374
+
375
+ # Heuristically split text of discontiguous entities into chunks
376
+ ann["text"] = []
377
+ if len(ann["offsets"]) > 1:
378
+ i = 0
379
+ for start, end in ann["offsets"]:
380
+ chunk_len = end - start
381
+ ann["text"].append(text[i : chunk_len + i])
382
+ i += chunk_len
383
+ while i < len(text) and text[i] == " ":
384
+ i += 1
385
+ else:
386
+ ann["text"] = [text]
387
+
388
+ example["text_bound_annotations"].append(ann)
389
+
390
+ elif line.startswith("E"):
391
+ ann = {}
392
+ fields = line.split("\t")
393
+
394
+ ann["id"] = fields[0]
395
+
396
+ ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
397
+
398
+ ann["arguments"] = []
399
+ for role_ref_id in fields[1].split()[1:]:
400
+ argument = {
401
+ "role": (role_ref_id.split(":"))[0],
402
+ "ref_id": (role_ref_id.split(":"))[1],
403
+ }
404
+ ann["arguments"].append(argument)
405
+
406
+ example["events"].append(ann)
407
+
408
+ elif line.startswith("R"):
409
+ ann = {}
410
+ fields = line.split("\t")
411
+
412
+ ann["id"] = fields[0]
413
+ ann["type"] = fields[1].split()[0]
414
+
415
+ ann["head"] = {
416
+ "role": fields[1].split()[1].split(":")[0],
417
+ "ref_id": fields[1].split()[1].split(":")[1],
418
+ }
419
+ ann["tail"] = {
420
+ "role": fields[1].split()[2].split(":")[0],
421
+ "ref_id": fields[1].split()[2].split(":")[1],
422
+ }
423
+
424
+ example["relations"].append(ann)
425
+
426
+ # '*' seems to be the legacy way to mark equivalences,
427
+ # but I couldn't find any info on the current way
428
+ # this might have to be adapted dependent on the brat version
429
+ # of the annotation
430
+ elif line.startswith("*"):
431
+ ann = {}
432
+ fields = line.split("\t")
433
+
434
+ ann["id"] = fields[0]
435
+ ann["ref_ids"] = fields[1].split()[1:]
436
+
437
+ example["equivalences"].append(ann)
438
+
439
+ elif line.startswith("A") or line.startswith("M"):
440
+ ann = {}
441
+ fields = line.split("\t")
442
+
443
+ ann["id"] = fields[0]
444
+
445
+ info = fields[1].split()
446
+ ann["type"] = info[0]
447
+ ann["ref_id"] = info[1]
448
+
449
+ if len(info) > 2:
450
+ ann["value"] = info[2]
451
+ else:
452
+ ann["value"] = ""
453
+
454
+ example["attributes"].append(ann)
455
+
456
+ elif line.startswith("N"):
457
+ ann = {}
458
+ fields = line.split("\t")
459
+
460
+ ann["id"] = fields[0]
461
+ ann["text"] = fields[2]
462
+
463
+ info = fields[1].split()
464
+
465
+ ann["type"] = info[0]
466
+ ann["ref_id"] = info[1]
467
+ ann["resource_name"] = info[2].split(":")[0]
468
+ ann["cuid"] = info[2].split(":")[1]
469
+ example["normalizations"].append(ann)
470
+
471
+ elif parse_notes and line.startswith("#"):
472
+ ann = {}
473
+ fields = line.split("\t")
474
+
475
+ ann["id"] = fields[0]
476
+ ann["text"] = fields[2] if len(fields) == 3 else BigBioValues.NULL
477
+
478
+ info = fields[1].split()
479
+
480
+ ann["type"] = info[0]
481
+ ann["ref_id"] = info[1]
482
+ example["notes"].append(ann)
483
+
484
+ return example
485
+
486
+
487
+ def brat_parse_to_bigbio_kb(brat_parse: Dict) -> Dict:
488
+ """
489
+ Transform a brat parse (conforming to the standard brat schema) obtained with
490
+ `parse_brat_file` into a dictionary conforming to the `bigbio-kb` schema (as defined in ../schemas/kb.py)
491
+ :param brat_parse:
492
+ """
493
+
494
+ unified_example = {}
495
+
496
+ # Prefix all ids with document id to ensure global uniqueness,
497
+ # because brat ids are only unique within their document
498
+ id_prefix = brat_parse["document_id"] + "_"
499
+
500
+ # identical
501
+ unified_example["document_id"] = brat_parse["document_id"]
502
+ unified_example["passages"] = [
503
+ {
504
+ "id": id_prefix + "_text",
505
+ "type": "abstract",
506
+ "text": [brat_parse["text"]],
507
+ "offsets": [[0, len(brat_parse["text"])]],
508
+ }
509
+ ]
510
+
511
+ # get normalizations
512
+ ref_id_to_normalizations = defaultdict(list)
513
+ for normalization in brat_parse["normalizations"]:
514
+ ref_id_to_normalizations[normalization["ref_id"]].append(
515
+ {
516
+ "db_name": normalization["resource_name"],
517
+ "db_id": normalization["cuid"],
518
+ }
519
+ )
520
+
521
+ # separate entities and event triggers
522
+ unified_example["events"] = []
523
+ non_event_ann = brat_parse["text_bound_annotations"].copy()
524
+ for event in brat_parse["events"]:
525
+ event = event.copy()
526
+ event["id"] = id_prefix + event["id"]
527
+ trigger = next(
528
+ tr
529
+ for tr in brat_parse["text_bound_annotations"]
530
+ if tr["id"] == event["trigger"]
531
+ )
532
+ if trigger in non_event_ann:
533
+ non_event_ann.remove(trigger)
534
+ event["trigger"] = {
535
+ "text": trigger["text"].copy(),
536
+ "offsets": trigger["offsets"].copy(),
537
+ }
538
+ for argument in event["arguments"]:
539
+ argument["ref_id"] = id_prefix + argument["ref_id"]
540
+
541
+ unified_example["events"].append(event)
542
+
543
+ unified_example["entities"] = []
544
+ anno_ids = [ref_id["id"] for ref_id in non_event_ann]
545
+ for ann in non_event_ann:
546
+ entity_ann = ann.copy()
547
+ entity_ann["id"] = id_prefix + entity_ann["id"]
548
+ entity_ann["normalized"] = ref_id_to_normalizations[ann["id"]]
549
+ unified_example["entities"].append(entity_ann)
550
+
551
+ # massage relations
552
+ unified_example["relations"] = []
553
+ skipped_relations = set()
554
+ for ann in brat_parse["relations"]:
555
+ if (
556
+ ann["head"]["ref_id"] not in anno_ids
557
+ or ann["tail"]["ref_id"] not in anno_ids
558
+ ):
559
+ skipped_relations.add(ann["id"])
560
+ continue
561
+ unified_example["relations"].append(
562
+ {
563
+ "arg1_id": id_prefix + ann["head"]["ref_id"],
564
+ "arg2_id": id_prefix + ann["tail"]["ref_id"],
565
+ "id": id_prefix + ann["id"],
566
+ "type": ann["type"],
567
+ "normalized": [],
568
+ }
569
+ )
570
+ if len(skipped_relations) > 0:
571
+ example_id = brat_parse["document_id"]
572
+ logger.info(
573
+ f"Example:{example_id}: The `bigbio_kb` schema allows `relations` only between entities."
574
+ f" Skip (for now): "
575
+ f"{list(skipped_relations)}"
576
+ )
577
+
578
+ # get coreferences
579
+ unified_example["coreferences"] = []
580
+ for i, ann in enumerate(brat_parse["equivalences"], start=1):
581
+ is_entity_cluster = True
582
+ for ref_id in ann["ref_ids"]:
583
+ if not ref_id.startswith("T"): # not textbound -> no entity
584
+ is_entity_cluster = False
585
+ elif ref_id not in anno_ids: # event trigger -> no entity
586
+ is_entity_cluster = False
587
+ if is_entity_cluster:
588
+ entity_ids = [id_prefix + i for i in ann["ref_ids"]]
589
+ unified_example["coreferences"].append(
590
+ {"id": id_prefix + str(i), "entity_ids": entity_ids}
591
+ )
592
+ return unified_example
craft.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import itertools
17
+ import os
18
+ from typing import Dict, Iterator, List
19
+ from xml.etree import ElementTree as ET
20
+
21
+ import datasets
22
+
23
+ from .bigbiohub import BigBioConfig, Tasks, kb_features
24
+
25
+ _LOCAL = False
26
+ _LANGUAGES = ["English"]
27
+ _PUBMED = True
28
+ _CITATION = """\
29
+ @article{bada2012concept,
30
+ title={Concept annotation in the CRAFT corpus},
31
+ author={Bada, Michael and Eckert, Miriam and Evans, Donald and Garcia, Kristin and Shipley, Krista and Sitnikov, \
32
+ Dmitry and Baumgartner, William A and Cohen, K Bretonnel and Verspoor, Karin and Blake, Judith A and others},
33
+ journal={BMC bioinformatics},
34
+ volume={13},
35
+ number={1},
36
+ pages={1--20},
37
+ year={2012},
38
+ publisher={BioMed Central}
39
+ }
40
+ """
41
+
42
+ _DATASETNAME = "craft"
43
+ _DISPLAYNAME = "CRAFT"
44
+
45
+ _DESCRIPTION = """
46
+ This dataset contains the CRAFT corpus, a collection of 97 articles from the PubMed Central Open Access subset,
47
+ each of which has been annotated along a number of different axes spanning structural, coreference, and concept
48
+ annotation. Due to current limitations of the current schema, corefs are not included in this dataloader.
49
+ They will be implemented in a future version.
50
+ """
51
+
52
+ _HOMEPAGE = "https://github.com/UCDenver-ccp/CRAFT"
53
+
54
+ _LICENSE = "CC_BY_3p0_US"
55
+
56
+ _URL = {
57
+ "source": "https://github.com/UCDenver-ccp/CRAFT/archive/refs/tags/v5.0.2.zip",
58
+ "bigbio_kb": "https://github.com/UCDenver-ccp/CRAFT/archive/refs/tags/v5.0.2.zip",
59
+ }
60
+
61
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.NAMED_ENTITY_DISAMBIGUATION]
62
+
63
+ _SOURCE_VERSION = "5.0.2"
64
+ _BIGBIO_VERSION = "1.0.0"
65
+
66
+ _CONCEPT_ANNOTATIONS = {
67
+ "CHEBI": "Chemical Entities of Biological Interest ",
68
+ "CL": "Cell Ontology",
69
+ "GO_BP": "Gene Ontology Biological Process",
70
+ "GO_CC": "Gene Ontology Cellular Component",
71
+ "GO_MF": "Gene Ontology Molecular Function",
72
+ "MONDO": "MONDO Disease Ontology",
73
+ "MOP": "Molecular Process Ontology",
74
+ "NCBITaxon": "NCBI Taxonomy",
75
+ "PR": "Protein Ontology",
76
+ "SO": "Sequence Ontology",
77
+ "UBERON": "Uberon",
78
+ }
79
+
80
+ logger = datasets.utils.logging.get_logger(__name__)
81
+
82
+
83
+ class CraftDataset(datasets.GeneratorBasedBuilder):
84
+ """
85
+ This dataset presents the concept annotations of the Colorado Richly Annotated Full-Text (CRAFT) Corpus, a
86
+ collection of 97 full-length, open-access biomedical journal articles that have been annotated both semantically
87
+ and syntactically to serve as a research resource for the biomedical natural-language-processing (NLP) community.
88
+ CRAFT identifies all mentions of nearly all concepts from nine prominent biomedical ontologies and terminologies:
89
+ - the Cell Type Ontology,
90
+ - the Chemical Entities of Biological Interest ontology,
91
+ - the NCBI Taxonomy, the Protein Ontology,
92
+ - the Sequence Ontology,
93
+ - the entries of the Entrez Gene database, and t
94
+ - he three subontologies of the Gene Ontology.
95
+ """
96
+
97
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
98
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
99
+
100
+ bigbio_schema_name = "kb"
101
+ BUILDER_CONFIGS = [
102
+ BigBioConfig(
103
+ name=f"{_DATASETNAME}_source",
104
+ version=SOURCE_VERSION,
105
+ description=f"{_DATASETNAME} source schema",
106
+ schema="source",
107
+ subset_id=f"{_DATASETNAME}",
108
+ ),
109
+ BigBioConfig(
110
+ name=f"{_DATASETNAME}_bigbio_{bigbio_schema_name}",
111
+ version=BIGBIO_VERSION,
112
+ description=f"{_DATASETNAME} BigBio schema",
113
+ schema=f"bigbio_{bigbio_schema_name}",
114
+ subset_id=f"{_DATASETNAME}",
115
+ ),
116
+ ]
117
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
118
+
119
+ def _info(self) -> datasets.DatasetInfo:
120
+ if self.config.schema == "source":
121
+ features = datasets.Features(
122
+ {
123
+ "pmid": datasets.Value("string"),
124
+ "text": datasets.Value("string"),
125
+ "annotations": [
126
+ {
127
+ "offsets": datasets.Sequence([datasets.Value("int64")]),
128
+ "text": datasets.Sequence(datasets.Value("string")),
129
+ "db_name": datasets.Value("string"),
130
+ "db_id": datasets.Value("string"),
131
+ }
132
+ ],
133
+ }
134
+ )
135
+ elif self.config.schema == "bigbio_kb":
136
+ features = kb_features
137
+ else:
138
+ raise NotImplementedError(f"Schema {self.config.schema} not supported")
139
+
140
+ return datasets.DatasetInfo(
141
+ description=_DESCRIPTION,
142
+ features=features,
143
+ homepage=_HOMEPAGE,
144
+ license=_LICENSE,
145
+ citation=_CITATION,
146
+ )
147
+
148
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
149
+ """Returns SplitGenerators."""
150
+ urls = _URL[self.config.schema]
151
+ data_dir = dl_manager.download_and_extract(urls)
152
+ return [
153
+ datasets.SplitGenerator(
154
+ name=datasets.Split.TRAIN,
155
+ gen_kwargs={"data_dir": data_dir, "split": "train"},
156
+ ),
157
+ datasets.SplitGenerator(
158
+ name=datasets.Split.VALIDATION,
159
+ gen_kwargs={"data_dir": data_dir, "split": "validation"},
160
+ ),
161
+ datasets.SplitGenerator(
162
+ name=datasets.Split.TEST,
163
+ gen_kwargs={"data_dir": data_dir, "split": "test"},
164
+ ),
165
+ ]
166
+
167
+ def get_splits(self, data_dir: str) -> Dict:
168
+ """Load `dict[split, list[pmid]]`"""
169
+
170
+ splits_dir = os.path.join(data_dir, f"CRAFT-{_SOURCE_VERSION}", "articles", "ids")
171
+ splits = {}
172
+ for split in ["train", "dev", "test"]:
173
+ with open(os.path.join(splits_dir, f"craft-ids-{split}.txt")) as fp:
174
+ split_name = "validation" if split == "dev" else split
175
+ splits[split_name] = [line.strip() for line in fp.readlines()]
176
+ return splits
177
+
178
+ def get_texts(self, data_dir: str) -> Dict:
179
+ """Load dict[pmid,text]"""
180
+
181
+ texts_dir = os.path.join(data_dir, f"CRAFT-{_SOURCE_VERSION}", "articles", "txt")
182
+ documents = {}
183
+ for file in os.listdir(texts_dir):
184
+ if not file.endswith(".txt"):
185
+ continue
186
+
187
+ pmid = file.replace(".txt", "")
188
+ with open(os.path.join(texts_dir, file)) as fp:
189
+ documents[pmid] = fp.read()
190
+
191
+ return documents
192
+
193
+ def _extract_mondo_annotations(self, path: str) -> Iterator[Dict]:
194
+ """Extract MONDO annotations"""
195
+ root = ET.parse(path)
196
+ for a in root.findall("document/annotation"):
197
+ span = a.find("span")
198
+ assert span is not None
199
+
200
+ start = span.attrib["start"]
201
+ end = span.attrib["end"]
202
+
203
+ ea = {
204
+ "offsets": [[start, end]],
205
+ "text": [span.text],
206
+ }
207
+
208
+ normalization = a.find("class")
209
+ if normalization is not None:
210
+ mondo_id = normalization.attrib["id"].replace("http://purl.obolibrary.org/obo/", "")
211
+ mondo_id = mondo_id.replace("_", ":")
212
+ ea["db_id"] = mondo_id
213
+
214
+ yield ea
215
+
216
+ def _extract_other_annotations(self, path: str) -> Iterator[Dict]:
217
+ """Extract all other annotations (CHEBI, UBERON, ...)"""
218
+
219
+ # NOTE: handle knowtator normalization format
220
+ # <annotation>
221
+ # <mention id="UBERON_Instance_30000" />
222
+ # </annotation>
223
+ # <classMention id="UBERON_Instance_30166">
224
+ # <mentionClass id="UBERON:0002435">striatum</mentionClass>
225
+ # </classMention>
226
+
227
+ root = ET.parse(path)
228
+ instance_to_db_id = {
229
+ e.attrib["id"]: e.find("mentionClass").attrib["id"]
230
+ for e in root.findall("classMention")
231
+ if e.find("mentionClass") is not None
232
+ }
233
+
234
+ for a in root.findall("annotation"):
235
+ span = a.find("span")
236
+ assert span is not None
237
+ offsets = [[span.attrib["start"], span.attrib["end"]] for span in a.findall("span")]
238
+ text = a.find("spannedText").text.split(" ... ")
239
+ ea = {"offsets": offsets, "text": text}
240
+ mention = a.find("mention")
241
+ db_id = None
242
+ if mention is not None:
243
+ instance = mention.attrib["id"]
244
+ db_id = instance_to_db_id.get(instance)
245
+ ea["db_id"] = db_id
246
+
247
+ yield ea
248
+
249
+ def get_annotations(self, data_dir: str) -> Dict:
250
+ """Load dict[pmid,annotations]"""
251
+
252
+ annotations_dir = os.path.join(data_dir, f"CRAFT-{_SOURCE_VERSION}", "concept-annotation")
253
+
254
+ annotations: Dict = {}
255
+ for concept in _CONCEPT_ANNOTATIONS:
256
+ if concept == "MONDO":
257
+ folder = os.path.join(
258
+ annotations_dir,
259
+ "MONDO",
260
+ "MONDO_without_genotype_annotations",
261
+ "knowtator-2",
262
+ )
263
+ else:
264
+ folder = os.path.join(
265
+ annotations_dir,
266
+ concept,
267
+ concept,
268
+ "knowtator",
269
+ )
270
+
271
+ for file in sorted(os.listdir(folder)):
272
+ pmid = file.replace(".xml", "").replace(".txt", "").replace(".knowtator", "")
273
+ path = os.path.join(folder, file)
274
+
275
+ if pmid not in annotations:
276
+ annotations[pmid] = []
277
+
278
+ annotations_generator = (
279
+ self._extract_mondo_annotations(path)
280
+ if concept == "MONDO"
281
+ else self._extract_other_annotations(path)
282
+ )
283
+
284
+ for a in annotations_generator:
285
+ a["db_name"] = concept
286
+ annotations[pmid].append(a)
287
+
288
+ return annotations
289
+
290
+ def _generate_examples(self, data_dir: str, split: str):
291
+ """Yields examples as (key, example) tuples."""
292
+
293
+ splits = self.get_splits(data_dir=data_dir)
294
+ texts = self.get_texts(data_dir=data_dir)
295
+ annotations = self.get_annotations(data_dir=data_dir)
296
+
297
+ if self.config.schema == "source":
298
+ for pmid in splits[split]:
299
+ example = {
300
+ "pmid": pmid,
301
+ "text": texts[pmid],
302
+ "annotations": annotations[pmid],
303
+ }
304
+ yield pmid, example
305
+
306
+ elif self.config.schema == "bigbio_kb":
307
+ uid = map(str, itertools.count(start=0, step=1))
308
+ for pmid in splits[split]:
309
+ example = {
310
+ "id": next(uid),
311
+ "document_id": pmid,
312
+ "passages": [
313
+ {
314
+ "id": next(uid),
315
+ "type": "text",
316
+ "text": [texts[pmid]],
317
+ "offsets": [[0, len(texts[pmid])]],
318
+ }
319
+ ],
320
+ "entities": [
321
+ {
322
+ "id": next(uid),
323
+ "offsets": a["offsets"],
324
+ "text": a["text"],
325
+ "type": a["db_name"],
326
+ "normalized": [{"db_name": a["db_name"], "db_id": a["db_id"]}],
327
+ }
328
+ for a in annotations[pmid]
329
+ ],
330
+ "events": [],
331
+ "coreferences": [],
332
+ "relations": [],
333
+ }
334
+ yield next(uid), example