changed tokenization
Browse files- ref_seg_ger.py +8 -3
ref_seg_ger.py
CHANGED
|
@@ -19,7 +19,7 @@ import os
|
|
| 19 |
import numpy as np
|
| 20 |
from PIL import Image
|
| 21 |
from tokenizers import pre_tokenizers
|
| 22 |
-
from tokenizers.pre_tokenizers import Digits, Split, Whitespace
|
| 23 |
import datasets
|
| 24 |
from itertools import chain
|
| 25 |
import pandas as pd
|
|
@@ -143,9 +143,14 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
| 143 |
|
| 144 |
# DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 145 |
|
| 146 |
-
|
| 147 |
|
| 148 |
-
TOKENIZER =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
|
| 150 |
def _info(self):
|
| 151 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
|
|
|
| 19 |
import numpy as np
|
| 20 |
from PIL import Image
|
| 21 |
from tokenizers import pre_tokenizers
|
| 22 |
+
from tokenizers.pre_tokenizers import Digits, Split, Whitespace, Sequence
|
| 23 |
import datasets
|
| 24 |
from itertools import chain
|
| 25 |
import pandas as pd
|
|
|
|
| 143 |
|
| 144 |
# DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 145 |
|
| 146 |
+
split_tokens = [".", ":", ",", ";", "/", "-", "(", ")"]
|
| 147 |
|
| 148 |
+
TOKENIZER = Sequence([
|
| 149 |
+
Whitespace(),
|
| 150 |
+
Digits(),
|
| 151 |
+
] + [Split(x, behavior="isolated") for x in split_tokens])
|
| 152 |
+
|
| 153 |
+
#TOKENIZER = Whitespace()
|
| 154 |
|
| 155 |
def _info(self):
|
| 156 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|