FranciscoLozDataScience commited on
Commit
eda0038
·
1 Parent(s): 5784b19

Upload 2 files

Browse files
Files changed (2) hide show
  1. README.md +14 -0
  2. solarirradiancedataset.py +90 -0
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ dataset_info:
3
+ features:
4
+ - name: image
5
+ dtype: image
6
+ - name: irradiance
7
+ dtype: float32
8
+ splits:
9
+ - name: full
10
+ num_bytes: 13466250
11
+ num_examples: 1000
12
+ download_size: 14234112
13
+ dataset_size: 13466250
14
+ ---
solarirradiancedataset.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # USAGE: this script is used to create an image dataset that is NOT hosted on HuggingFace but points to the original files
2
+ # to download and generate the dataset.
3
+
4
+ import csv
5
+ import os
6
+ import datasets
7
+ import io
8
+ import tarfile
9
+
10
+ # TODO: Add citation
11
+ # Find for instance the citation on arxiv or on the dataset repo/website
12
+ # _CITATION = """\
13
+ # @InProceedings{huggingface:dataset,
14
+ # title = {A great new dataset},
15
+ # author={huggingface, Inc.
16
+ # },
17
+ # year={2020}
18
+ # }
19
+ # """
20
+
21
+ _DESCRIPTION = """\
22
+ Images taken from the Sage Waggle Node's top camera and the solar irradiance values were taken from the Argonne National Laboratory
23
+ tower readings. We made sure to exclude night time photos since there is no sun and we exclusively used summer-time photos as we wanted
24
+ to stick to a seasonal model that would be able to make estimates more consistently. Furthermore we also eventually downsized the images
25
+ original 2000x2000 images to 500x500 images since the training was taking a bit too long when the images were larger.
26
+ """
27
+ _HOMEPAGE = "https://sagecontinuum.org/"
28
+
29
+ # TODO: Add the licence for the dataset here if you can find it
30
+ # _LICENSE = ""
31
+
32
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
33
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
34
+ _URLS = "https://web.lcrc.anl.gov/public/waggle/datasets/solar-irradiance-sample-224.tar"
35
+
36
+
37
+ class SolarIrradianceDataset(datasets.GeneratorBasedBuilder):
38
+
39
+ VERSION = datasets.Version("1.1.0")
40
+
41
+ def _info(self):
42
+
43
+ return datasets.DatasetInfo(
44
+ # This is the description that will appear on the datasets page.
45
+ description=_DESCRIPTION,
46
+ # This defines the different columns of the dataset and their types
47
+ features= datasets.Features(
48
+ {
49
+ "image": datasets.Image(),
50
+ "irradiance": datasets.Value("float32")
51
+ }
52
+ ),
53
+ # Homepage of the dataset for documentation
54
+ homepage=_HOMEPAGE
55
+ # License for the dataset if available
56
+ # license=_LICENSE
57
+ # Citation for the dataset
58
+ # citation=_CITATION,
59
+ )
60
+
61
+ def _split_generators(self, dl_manager):
62
+ data_dir = dl_manager.download(_URLS)
63
+ return [
64
+ datasets.SplitGenerator(
65
+ name="full",
66
+ # These kwargs will be passed to _generate_examples
67
+ gen_kwargs={
68
+ "files": dl_manager.iter_archive(data_dir)
69
+ },
70
+ )
71
+ ]
72
+
73
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
74
+ def _generate_examples(self, files):
75
+
76
+ for file_path, file_obj in files:
77
+ if ".csv" in file_path:
78
+ csv_bytes = file_obj.read()
79
+ csv_contents = str(csv_bytes,'UTF-8')
80
+ break
81
+
82
+ for file_path, file_obj in files:
83
+ filename = os.path.basename(file_path)
84
+ if ".jpg" in filename:
85
+ for row in csv.DictReader(csv_contents.strip().splitlines(),delimiter=','):
86
+ if os.path.basename(row['image']) == filename:
87
+ yield file_path,{
88
+ "image": {"path": file_path, "bytes": file_obj.read()},
89
+ "irradiance": row['irradiance']
90
+ }