batched tokenization
Browse files- docbank.py +7 -2
docbank.py
CHANGED
|
@@ -68,8 +68,10 @@ _FEATURES = datasets.Features(
|
|
| 68 |
"original_image": datasets.features.Image(),
|
| 69 |
#"labels": datasets.Sequence(feature=datasets.Value(dtype='int64'))
|
| 70 |
"labels": datasets.Sequence(datasets.features.ClassLabel(
|
| 71 |
-
names=['abstract', 'author', 'caption', '
|
| 72 |
'reference', 'section', 'table', 'title']
|
|
|
|
|
|
|
| 73 |
))
|
| 74 |
# These are the features of your dataset like images, labels ...
|
| 75 |
}
|
|
@@ -246,7 +248,10 @@ class Docbank(datasets.GeneratorBasedBuilder):
|
|
| 246 |
tokens.append(row['token'])
|
| 247 |
bboxes.append(normalized_bbox)
|
| 248 |
#print(f'Before: {row[9]}')
|
| 249 |
-
|
|
|
|
|
|
|
|
|
|
| 250 |
#print(f'After: {row[9]}')
|
| 251 |
# tokenized_input = self.TOKENIZER(
|
| 252 |
# row[0],
|
|
|
|
| 68 |
"original_image": datasets.features.Image(),
|
| 69 |
#"labels": datasets.Sequence(feature=datasets.Value(dtype='int64'))
|
| 70 |
"labels": datasets.Sequence(datasets.features.ClassLabel(
|
| 71 |
+
names=['abstract', 'author', 'caption', 'equation', 'figure', 'footer', 'paragraph',
|
| 72 |
'reference', 'section', 'table', 'title']
|
| 73 |
+
# names=['abstract', 'author', 'caption', 'date', 'equation', 'figure', 'footer', 'list', 'paragraph',
|
| 74 |
+
# 'reference', 'section', 'table', 'title']
|
| 75 |
))
|
| 76 |
# These are the features of your dataset like images, labels ...
|
| 77 |
}
|
|
|
|
| 248 |
tokens.append(row['token'])
|
| 249 |
bboxes.append(normalized_bbox)
|
| 250 |
#print(f'Before: {row[9]}')
|
| 251 |
+
label = row['label']
|
| 252 |
+
if (label == "list") or (label == "date"):
|
| 253 |
+
label = "paragraph"
|
| 254 |
+
labels.append(label)
|
| 255 |
#print(f'After: {row[9]}')
|
| 256 |
# tokenized_input = self.TOKENIZER(
|
| 257 |
# row[0],
|