smeoni commited on
Commit
00399e7
·
1 Parent(s): d9be4b5

Upload 2 files

Browse files
Files changed (2) hide show
  1. README.md +61 -76
  2. e3c.py +11 -56
README.md CHANGED
@@ -1,84 +1,69 @@
1
  ---
2
  dataset_info:
3
  features:
4
- - name: text
5
- dtype: string
6
- - name: tokens
7
- sequence: string
8
- - name: tokens_offsets
9
- sequence:
10
- sequence: int32
11
- - name: clinical_entity_tags
12
- sequence:
13
- class_label:
14
- names:
15
- "0": O
16
- "1": B-CLINENTITY
17
- "2": I-CLINENTITY
18
- - name: temporal_information_tags
19
- sequence:
20
- class_label:
21
- names:
22
- "0": O
23
- "1": B-EVENT
24
- "2": B-ACTOR
25
- "3": B-BODYPART
26
- "4": B-TIMEX3
27
- "5": B-RML
28
- "6": I-EVENT
29
- "7": I-ACTOR
30
- "8": I-BODYPART
31
- "9": I-TIMEX3
32
- "10": I-RML
33
  config_name: e3c
34
  splits:
35
- - name: en.layer1
36
- num_bytes: 1280534
37
- num_examples: 1520
38
- - name: en.layer2
39
- num_bytes: 2566638
40
- num_examples: 2873
41
- - name: en.layer2.validation
42
- num_bytes: 290490
43
- num_examples: 334
44
- - name: es.layer1
45
- num_bytes: 1262006
46
- num_examples: 1134
47
- - name: es.layer2
48
- num_bytes: 2524461
49
- num_examples: 2347
50
- - name: es.layer2.validation
51
- num_bytes: 278045
52
- num_examples: 261
53
- - name: eu.layer1
54
- num_bytes: 1537670
55
- num_examples: 3126
56
- - name: eu.layer2
57
- num_bytes: 853766
58
- num_examples: 1594
59
- - name: eu.layer2.validation
60
- num_bytes: 227044
61
- num_examples: 468
62
- - name: fr.layer1
63
- num_bytes: 1275362
64
- num_examples: 1109
65
- - name: fr.layer2
66
- num_bytes: 2581993
67
- num_examples: 2389
68
- - name: fr.layer2.validation
69
- num_bytes: 275163
70
- num_examples: 293
71
- - name: it.layer1
72
- num_bytes: 1299388
73
- num_examples: 1146
74
- - name: it.layer2
75
- num_bytes: 2697483
76
- num_examples: 2436
77
- - name: it.layer2.validation
78
- num_bytes: 291866
79
- num_examples: 275
80
  download_size: 230213492
81
- dataset_size: 19241909
82
  ---
83
 
84
  # Dataset Card for E3C
@@ -107,4 +92,4 @@ information about clinical entities based on medical taxonomies, to be used for
107
  url = {https://uts.nlm.nih.gov/uts/umls/home},
108
  year = {2021},
109
  }
110
- ```
 
1
  ---
2
  dataset_info:
3
  features:
4
+ - name: text
5
+ dtype: string
6
+ - name: tokens
7
+ sequence: string
8
+ - name: tokens_offsets
9
+ sequence:
10
+ sequence: int32
11
+ - name: clinical_entity_tags
12
+ sequence:
13
+ class_label:
14
+ names:
15
+ '0': O
16
+ '1': B-CLINENTITY
17
+ '2': I-CLINENTITY
18
+ - name: temporal_information_tags
19
+ sequence:
20
+ class_label:
21
+ names:
22
+ '0': O
23
+ '1': B-EVENT
24
+ '2': B-ACTOR
25
+ '3': B-BODYPART
26
+ '4': B-TIMEX3
27
+ '5': B-RML
28
+ '6': I-EVENT
29
+ '7': I-ACTOR
30
+ '8': I-BODYPART
31
+ '9': I-TIMEX3
32
+ '10': I-RML
33
  config_name: e3c
34
  splits:
35
+ - name: en.layer1
36
+ num_bytes: 1273610
37
+ num_examples: 1520
38
+ - name: en.layer2
39
+ num_bytes: 2550153
40
+ num_examples: 2873
41
+ - name: es.layer1
42
+ num_bytes: 1252571
43
+ num_examples: 1134
44
+ - name: es.layer2
45
+ num_bytes: 2498266
46
+ num_examples: 2347
47
+ - name: eu.layer1
48
+ num_bytes: 1519021
49
+ num_examples: 3126
50
+ - name: eu.layer2
51
+ num_bytes: 839955
52
+ num_examples: 1594
53
+ - name: fr.layer1
54
+ num_bytes: 1258738
55
+ num_examples: 1109
56
+ - name: fr.layer2
57
+ num_bytes: 2628628
58
+ num_examples: 2389
59
+ - name: it.layer1
60
+ num_bytes: 1276534
61
+ num_examples: 1146
62
+ - name: it.layer2
63
+ num_bytes: 2641257
64
+ num_examples: 2436
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  download_size: 230213492
66
+ dataset_size: 17738733
67
  ---
68
 
69
  # Dataset Card for E3C
 
92
  url = {https://uts.nlm.nih.gov/uts/umls/home},
93
  year = {2021},
94
  }
95
+ ```
e3c.py CHANGED
@@ -4,6 +4,10 @@ from typing import Iterator
4
  import datasets
5
  from bs4 import BeautifulSoup, ResultSet
6
  from datasets import DownloadManager
 
 
 
 
7
 
8
  _CITATION = """\
9
  @report{Magnini2021,
@@ -132,17 +136,6 @@ class E3C(datasets.GeneratorBasedBuilder):
132
  ),
133
  },
134
  ),
135
- datasets.SplitGenerator(
136
- name="en.layer2.validation",
137
- gen_kwargs={
138
- "filepath": os.path.join(
139
- data_dir,
140
- "E3C-Corpus-2.0.0/data_validation",
141
- "English",
142
- "layer2",
143
- ),
144
- },
145
- ),
146
  datasets.SplitGenerator(
147
  name="es.layer1",
148
  gen_kwargs={
@@ -165,17 +158,6 @@ class E3C(datasets.GeneratorBasedBuilder):
165
  ),
166
  },
167
  ),
168
- datasets.SplitGenerator(
169
- name="es.layer2.validation",
170
- gen_kwargs={
171
- "filepath": os.path.join(
172
- data_dir,
173
- "E3C-Corpus-2.0.0/data_validation",
174
- "Spanish",
175
- "layer2",
176
- ),
177
- },
178
- ),
179
  datasets.SplitGenerator(
180
  name="eu.layer1",
181
  gen_kwargs={
@@ -198,17 +180,6 @@ class E3C(datasets.GeneratorBasedBuilder):
198
  ),
199
  },
200
  ),
201
- datasets.SplitGenerator(
202
- name="eu.layer2.validation",
203
- gen_kwargs={
204
- "filepath": os.path.join(
205
- data_dir,
206
- "E3C-Corpus-2.0.0/data_validation",
207
- "Basque",
208
- "layer2",
209
- ),
210
- },
211
- ),
212
  datasets.SplitGenerator(
213
  name="fr.layer1",
214
  gen_kwargs={
@@ -231,17 +202,6 @@ class E3C(datasets.GeneratorBasedBuilder):
231
  ),
232
  },
233
  ),
234
- datasets.SplitGenerator(
235
- name="fr.layer2.validation",
236
- gen_kwargs={
237
- "filepath": os.path.join(
238
- data_dir,
239
- "E3C-Corpus-2.0.0/data_validation",
240
- "French",
241
- "layer2",
242
- ),
243
- },
244
- ),
245
  datasets.SplitGenerator(
246
  name="it.layer1",
247
  gen_kwargs={
@@ -264,17 +224,6 @@ class E3C(datasets.GeneratorBasedBuilder):
264
  ),
265
  },
266
  ),
267
- datasets.SplitGenerator(
268
- name="it.layer2.validation",
269
- gen_kwargs={
270
- "filepath": os.path.join(
271
- data_dir,
272
- "E3C-Corpus-2.0.0/data_validation",
273
- "Italian",
274
- "layer2",
275
- ),
276
- },
277
- ),
278
  ]
279
 
280
  @staticmethod
@@ -339,10 +288,16 @@ class E3C(datasets.GeneratorBasedBuilder):
339
  guid = 0
340
  for content in self.get_parsed_data(filepath):
341
  for sentence in content["SENTENCE"]:
 
 
 
 
 
 
342
  filtered_tokens = list(
343
  filter(
344
  lambda token: token[0] >= sentence[0] and token[1] <= sentence[1],
345
- content["TOKENS"],
346
  )
347
  )
348
  tokens_offsets = [
 
4
  import datasets
5
  from bs4 import BeautifulSoup, ResultSet
6
  from datasets import DownloadManager
7
+ from syntok.tokenizer import Tokenizer
8
+
9
+ tok = Tokenizer()
10
+
11
 
12
  _CITATION = """\
13
  @report{Magnini2021,
 
136
  ),
137
  },
138
  ),
 
 
 
 
 
 
 
 
 
 
 
139
  datasets.SplitGenerator(
140
  name="es.layer1",
141
  gen_kwargs={
 
158
  ),
159
  },
160
  ),
 
 
 
 
 
 
 
 
 
 
 
161
  datasets.SplitGenerator(
162
  name="eu.layer1",
163
  gen_kwargs={
 
180
  ),
181
  },
182
  ),
 
 
 
 
 
 
 
 
 
 
 
183
  datasets.SplitGenerator(
184
  name="fr.layer1",
185
  gen_kwargs={
 
202
  ),
203
  },
204
  ),
 
 
 
 
 
 
 
 
 
 
 
205
  datasets.SplitGenerator(
206
  name="it.layer1",
207
  gen_kwargs={
 
224
  ),
225
  },
226
  ),
 
 
 
 
 
 
 
 
 
 
 
227
  ]
228
 
229
  @staticmethod
 
288
  guid = 0
289
  for content in self.get_parsed_data(filepath):
290
  for sentence in content["SENTENCE"]:
291
+ tokens = [
292
+ (token.offset + sentence[0], token.offset + sentence[0] + len(token.value),
293
+ token.value)
294
+ for token in list(tok.tokenize(sentence[-1]))
295
+ ]
296
+
297
  filtered_tokens = list(
298
  filter(
299
  lambda token: token[0] >= sentence[0] and token[1] <= sentence[1],
300
+ tokens,
301
  )
302
  )
303
  tokens_offsets = [