| # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| # TODO: Address all TODOs and remove all explanatory comments | |
| """TODO: Add a description here.""" | |
| import csv | |
| import os | |
| from glob import glob | |
| import numpy as np | |
| from PIL import Image | |
| from transformers import LayoutXLMTokenizerFast | |
| import datasets | |
| # TODO: Add BibTeX citation | |
| # Find for instance the citation on arxiv or on the dataset repo/website | |
| _CITATION = """\ | |
| @InProceedings{huggingface:dataset, | |
| title = {A great new dataset}, | |
| author={huggingface, Inc. | |
| }, | |
| year={2020} | |
| } | |
| """ | |
| # TODO: Add description of the dataset here | |
| # You can copy an official description | |
| _DESCRIPTION = """\ | |
| This new dataset is designed to solve this great NLP task and is crafted with a lot of care. | |
| """ | |
| # TODO: Add a link to an official homepage for the dataset here | |
| _HOMEPAGE = "" | |
| # TODO: Add the licence for the dataset here if you can find it | |
| _LICENSE = "" | |
| # TODO: Add link to the official dataset URLs here | |
| # The HuggingFace Datasets library doesn't host the datasets but only points to the original files. | |
| # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) | |
| _URLS = { | |
| "sample": "http://hyperion.bbirke.de/data/docbank/sample_resized.zip", | |
| "data": { | |
| 'train': 'http://hyperion.bbirke.de/data/geocite/train.zip', | |
| 'test': 'http://hyperion.bbirke.de/data/geocite/test.zip', | |
| }, | |
| } | |
| _FEATURES = datasets.Features( | |
| { | |
| "id": datasets.Value("string"), | |
| "words": datasets.Sequence(datasets.Value("string")), | |
| "bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), | |
| # "RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), | |
| # "fonts": datasets.Sequence(datasets.Value("string")), | |
| #"image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"), | |
| "original_image": datasets.features.Image(), | |
| "dataset": datasets.Value("string"), | |
| #"labels": datasets.Sequence(feature=datasets.Value(dtype='int64')) | |
| "labels": datasets.Sequence(datasets.features.ClassLabel( | |
| names=['abstract', 'author', 'caption', 'equation', 'figure', 'footer', 'paragraph', | |
| 'reference', 'section', 'table', 'title'] | |
| # names=['abstract', 'author', 'caption', 'date', 'equation', 'figure', 'footer', 'list', 'paragraph', | |
| # 'reference', 'section', 'table', 'title'] | |
| )) | |
| # These are the features of your dataset like images, labels ... | |
| } | |
| ) | |
| def load_image(image_path, size=None): | |
| image = Image.open(image_path).convert("RGB") | |
| w, h = image.size | |
| if size is not None: | |
| # resize image | |
| image = image.resize((size, size)) | |
| image = np.asarray(image) | |
| image = image[:, :, ::-1] # flip color channels from RGB to BGR | |
| image = image.transpose(2, 0, 1) # move channels to first dimension | |
| return image, (w, h) | |
| # def normalize_bbox(bbox, size): | |
| # return [ | |
| # int(1000 * int(bbox[0]) / size[0]), | |
| # int(1000 * int(bbox[1]) / size[1]), | |
| # int(1000 * int(bbox[2]) / size[0]), | |
| # int(1000 * int(bbox[3]) / size[1]), | |
| # ] | |
| # | |
| # | |
| # def simplify_bbox(bbox): | |
| # return [ | |
| # min(bbox[0::2]), | |
| # min(bbox[1::2]), | |
| # max(bbox[2::2]), | |
| # max(bbox[3::2]), | |
| # ] | |
| # | |
| # | |
| # def merge_bbox(bbox_list): | |
| # x0, y0, x1, y1 = list(zip(*bbox_list)) | |
| # return [min(x0), min(y0), max(x1), max(y1)] | |
| # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case | |
| class Docbank(datasets.GeneratorBasedBuilder): | |
| """TODO: Short description of my dataset.""" | |
| CHUNK_SIZE = 512 | |
| VERSION = datasets.Version("1.0.0") | |
| # This is an example of a dataset with multiple configurations. | |
| # If you don't want/need to define several sub-sets in your dataset, | |
| # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. | |
| # If you need to make complex sub-parts in the datasets with configurable options | |
| # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig | |
| # BUILDER_CONFIG_CLASS = MyBuilderConfig | |
| # You will be able to load one or the other configurations in the following list with | |
| # data = datasets.load_dataset('my_dataset', 'first_domain') | |
| # data = datasets.load_dataset('my_dataset', 'second_domain') | |
| BUILDER_CONFIGS = [ | |
| datasets.BuilderConfig(name="sample", version=VERSION, | |
| description="This part of my dataset covers a first domain"), | |
| datasets.BuilderConfig(name="data", version=VERSION, | |
| description="This part of my dataset covers a second domain"), | |
| ] | |
| # DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense. | |
| TOKENIZER = LayoutXLMTokenizerFast.from_pretrained("microsoft/layoutxlm-base", only_label_first_subword=False) | |
| LABELS = ['abstract', 'author', 'caption', 'date', 'equation', 'figure', 'footer', 'list', 'paragraph', 'reference', 'section', 'table', 'title'] | |
| ID2LABEL = {k: v for k, v in enumerate(LABELS)} | |
| LABEL2ID = {v: k for k, v in enumerate(LABELS)} | |
| def _info(self): | |
| # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset | |
| return datasets.DatasetInfo( | |
| # This is the description that will appear on the datasets page. | |
| description=_DESCRIPTION, | |
| # This defines the different columns of the dataset and their types | |
| features=_FEATURES, # Here we define them above because they are different between the two configurations | |
| # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and | |
| # specify them. They'll be used if as_supervised=True in builder.as_dataset. | |
| # supervised_keys=("sentence", "label"), | |
| # Homepage of the dataset for documentation | |
| homepage=_HOMEPAGE, | |
| # License for the dataset if available | |
| license=_LICENSE, | |
| # Citation for the dataset | |
| citation=_CITATION, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration | |
| # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name | |
| # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS | |
| # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. | |
| # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive | |
| urls = _URLS[self.config.name] | |
| data_dir = dl_manager.download_and_extract(urls) | |
| #print(data_dir) | |
| train_txts = glob(data_dir['train'] + '/train/txt/*.csv') | |
| #print(train_txts[0]) | |
| train_data = [(txt, data_dir['train'] + '/train/img/' + os.path.basename(txt)[:-4] + '.jpg') for txt in train_txts] | |
| test_txts = glob(data_dir['test'] + '/test/txt/*.csv') | |
| test_data = [(txt, data_dir['test'] + '/test/img/' + os.path.basename(txt)[:-4] + '.jpg') for txt in test_txts] | |
| # with open(os.path.join(data_dir, "train.csv")) as f: | |
| # files_train = [{'id': row['id'], 'filepath_txt': os.path.join(data_dir, row['filepath_txt']), | |
| # 'filepath_img': os.path.join(data_dir, row['filepath_img'])} for row in | |
| # csv.DictReader(f, skipinitialspace=True)] | |
| # with open(os.path.join(data_dir, "test.csv")) as f: | |
| # files_test = [{'id': row['id'], 'filepath_txt': os.path.join(data_dir, row['filepath_txt']), | |
| # 'filepath_img': os.path.join(data_dir, row['filepath_img'])} for row in | |
| # csv.DictReader(f, skipinitialspace=True)] | |
| # with open(os.path.join(data_dir, "validation.csv")) as f: | |
| # files_validation = [{'id': row['id'], 'filepath_txt': os.path.join(data_dir, row['filepath_txt']), | |
| # 'filepath_img': os.path.join(data_dir, row['filepath_img'])} for row in | |
| # csv.DictReader(f, skipinitialspace=True)] | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "filepath": train_data, | |
| "split": "train", | |
| }, | |
| ), | |
| # datasets.SplitGenerator( | |
| # name=datasets.Split.VALIDATION, | |
| # # These kwargs will be passed to _generate_examples | |
| # gen_kwargs={ | |
| # "filepath": files_validation, | |
| # "split": "validation", | |
| # }, | |
| # ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TEST, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "filepath": test_data, | |
| "split": "test" | |
| }, | |
| ), | |
| ] | |
| # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
| def _generate_examples(self, filepath, split): | |
| # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. | |
| # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. | |
| # print(filepath) | |
| key = 0 | |
| for f_fp_txt, f_fp_img in filepath: | |
| dataset = f_fp_txt.split(".")[-2].split("_")[-1] | |
| #print(f_fp_txt) | |
| f_id = key | |
| #f_fp_txt = f['filepath_txt'] | |
| #f_fp_img = f['filepath_img'] | |
| tokens = [] | |
| bboxes = [] | |
| # rgbs = [] | |
| # fonts = [] | |
| labels = [] | |
| #image, size = load_image(f_fp_img, size=224) | |
| original_image, _ = load_image(f_fp_img) | |
| try: | |
| with open(f_fp_txt, encoding='utf-8') as csvfile: | |
| reader = csv.DictReader(csvfile, delimiter=',') | |
| for row in reader: | |
| #print(row) | |
| # normalized_bbox = normalize_bbox(row[1:5], size) | |
| normalized_bbox = [int(row['x0']), int(row['y0']), int(row['x1']), int(row['y1'])] | |
| tokens.append(row['token']) | |
| bboxes.append(normalized_bbox) | |
| #print(f'Before: {row[9]}') | |
| label = row['label'] | |
| if (label == "list") or (label == "date"): | |
| label = "paragraph" | |
| labels.append(label) | |
| #print(f'After: {row[9]}') | |
| # tokenized_input = self.TOKENIZER( | |
| # row[0], | |
| # add_special_tokens=False, | |
| # return_offsets_mapping=False, | |
| # return_attention_mask=False, | |
| # max_length=512, truncation=True | |
| # ) | |
| # | |
| # for tkn in tokenized_input['input_ids']: | |
| # tokens.append(tkn) | |
| # bboxes.append(normalized_bbox) | |
| # # rgbs.append(row[5:8]) | |
| # # fonts.append(row[8]) | |
| # labels.append(row[9]) | |
| except: | |
| continue | |
| #print('Processing...') | |
| # processed = self.TOKENIZER( | |
| # tokens, | |
| # boxes=bboxes, | |
| # word_labels=labels, | |
| # add_special_tokens=False, | |
| # return_offsets_mapping=False, | |
| # return_attention_mask=False, | |
| # ) | |
| #print(processed) | |
| # for chunk_id, index in enumerate(range(0, len(tokens), self.CHUNK_SIZE)): | |
| # split_tokens = tokens[index:index + self.CHUNK_SIZE] | |
| # split_bboxes = bboxes[index:index + self.CHUNK_SIZE] | |
| # # split_rgbs = rgbs[index:index + self.CHUNK_SIZE] | |
| # # split_fonts = fonts[index:index + self.CHUNK_SIZE] | |
| # split_labels = labels[index:index + self.CHUNK_SIZE] | |
| #tokenized = self.TOKENIZER(processed['words'], boxes=processed['boxes']) | |
| yield key, { | |
| "id": f"file_{f_id}", | |
| 'words': tokens, | |
| "bbox": bboxes, | |
| # "RGBs": split_rgbs, | |
| # "fonts": split_fonts, | |
| #"image": image, | |
| "original_image": original_image, | |
| "dataset": dataset, | |
| "labels": labels | |
| } | |
| key += 1 | |