File size: 3,386 Bytes
eda0038
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fe8bfda
 
eda0038
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fe8bfda
eda0038
fe8bfda
eda0038
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
# USAGE: this script is used to create an image dataset that is NOT hosted on HuggingFace but points to the original files
#        to download and generate the dataset.

import csv
import os
import datasets
import io
import tarfile

# TODO: Add citation
# Find for instance the citation on arxiv or on the dataset repo/website
# _CITATION = """\
# @InProceedings{huggingface:dataset,
# title = {A great new dataset},
# author={huggingface, Inc.
# },
# year={2020}
# }
# """

_DESCRIPTION = """\
Images taken from the Sage Waggle Node's top camera and the solar irradiance values were taken from the Argonne National Laboratory 
tower readings. We made sure to exclude night time photos since there is no sun and we exclusively used summer-time photos as we wanted 
to stick to a seasonal model that would be able to make estimates more consistently. Furthermore we also eventually downsized the images 
original 2000x2000 images to 500x500 images since the training was taking a bit too long when the images were larger.
"""
_HOMEPAGE = "https://sagecontinuum.org/"


_LICENSE = "MIT"

# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS = "https://web.lcrc.anl.gov/public/waggle/datasets/solar-irradiance-sample-224.tar"


class SolarIrradianceDataset(datasets.GeneratorBasedBuilder):

    VERSION = datasets.Version("1.1.0")

    def _info(self):

        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # This defines the different columns of the dataset and their types
            features= datasets.Features(
                {
                    "image": datasets.Image(),
                    "irradiance": datasets.Value("float32")
                }
            ),
            # Homepage of the dataset for documentation
            homepage=_HOMEPAGE,
            # License for the dataset if available
            license=_LICENSE
            # Citation for the dataset
            # citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        data_dir = dl_manager.download(_URLS)
        return [
            datasets.SplitGenerator(
                name="full",
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "files": dl_manager.iter_archive(data_dir)
                },
            )
        ]

    # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
    def _generate_examples(self, files):

        for file_path, file_obj in files:
            if ".csv" in file_path:
                csv_bytes = file_obj.read()
                csv_contents = str(csv_bytes,'UTF-8')
                break

        for file_path, file_obj in files:
            filename = os.path.basename(file_path)
            if ".jpg" in filename:
                for row in csv.DictReader(csv_contents.strip().splitlines(),delimiter=','):
                    if os.path.basename(row['image']) == filename:
                        yield file_path,{
                            "image": {"path": file_path, "bytes": file_obj.read()},
                            "irradiance": row['irradiance']
                        }