File size: 3,806 Bytes
e7c610a
 
 
 
 
 
 
 
 
 
 
 
94aa865
 
 
 
 
e7c610a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01e16fb
 
 
e7c610a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import datasets
import pandas as pd

_CITATION = """\
@InProceedings{huggingface:dataset,
title = {plantations_segmentation},
author = {TrainingDataPro},
year = {2023}
}
"""

_DESCRIPTION = """\
The images consist of aerial photography of agricultural plantations with crops
such as cabbage and zucchini. The dataset addresses agricultural tasks such as
plant detection and counting, health assessment, and irrigation planning.
The dataset consists of plantations' photographs with object and class
segmentation of cabbage.
"""
_NAME = 'plantations_segmentation'

_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"

_LICENSE = ""

_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"


class PlantationsSegmentation(datasets.GeneratorBasedBuilder):
    """Small sample of image-text pairs"""

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features({
                'image_id': datasets.Value('int32'),
                'image': datasets.Image(),
                'class_segmentation': datasets.Image(),
                'object_segmentation': datasets.Image(),
                'shapes': datasets.Value('string')
            }),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        images = dl_manager.download(f"{_DATA}images.tar.gz")
        class_segmentation_masks = dl_manager.download(
            f"{_DATA}class_segmentation.tar.gz")
        object_segmentation_masks = dl_manager.download(
            f"{_DATA}object_segmentation.tar.gz")
        annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
        images = dl_manager.iter_archive(images)
        class_segmentation_masks = dl_manager.iter_archive(
            class_segmentation_masks)
        object_segmentation_masks = dl_manager.iter_archive(
            object_segmentation_masks)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "images": images,
                    'class_segmentation_masks': class_segmentation_masks,
                    'object_segmentation_masks': object_segmentation_masks,
                    'annotations': annotations
                }),
        ]

    def _generate_examples(self, images, class_segmentation_masks,
                           object_segmentation_masks, annotations):
        annotations_df = pd.read_csv(annotations)

        for idx, ((image_path, image), (class_segmentation_path,
                                        class_segmentation),
                  (object_segmentation_path,
                   object_segmentation)) in enumerate(
                       zip(images, class_segmentation_masks,
                           object_segmentation_masks)):
            yield idx, {
                'image_id':
                    annotations_df.loc[
                        annotations_df['image_name'] == image_path]
                    ['image_id'].values[0],
                "image": {
                    "path": image_path,
                    "bytes": image.read()
                },
                "class_segmentation": {
                    "path": class_segmentation_path,
                    "bytes": class_segmentation.read()
                },
                "object_segmentation": {
                    "path": object_segmentation_path,
                    "bytes": object_segmentation.read()
                },
                'shapes':
                    annotations_df.loc[annotations_df['image_name'] ==
                                       image_path]['shapes'].values[0][:500] +
                    '...'
            }