|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import numpy as np |
|
import datasets |
|
from datasets import Value |
|
import pickle |
|
import pandas as pd |
|
|
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@misc{https://doi.org/10.48550/arxiv.2406.04928, |
|
doi = {10.48550/ARXIV.2406.04928}, |
|
url = {https://arxiv.org/abs/2406.04928}, |
|
author = {Sialelli, Ghjulia and Peters, Torben and Wegner, Jan D. and Schindler, Konrad}, |
|
keywords = {Computer Vision and Pattern Recognition (cs.CV), Machine Learning (cs.LG), Image and Video Processing (eess.IV), FOS: Computer and information sciences, FOS: Computer and information sciences, FOS: Electrical engineering, electronic engineering, information engineering, FOS: Electrical engineering, electronic engineering, information engineering}, |
|
title = {AGBD: A Global-scale Biomass Dataset}, |
|
publisher = {arXiv}, |
|
year = {2024}, |
|
copyright = {Creative Commons Attribution Non Commercial Share Alike 4.0 International} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
This new dataset is a machine-learning ready dataset of high-resolution (10m), multi-modal satellite imagery, paired with AGB reference values from NASA’s Global Ecosystem Dynamics Investigation (GEDI) mission. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "https://creativecommons.org/licenses/by-nc/4.0/" |
|
|
|
|
|
feature_dtype = {'s2_num_days': Value('int16'), |
|
'gedi_num_days': Value('uint16'), |
|
'lat': Value('float32'), |
|
'lon': Value('float32'), |
|
"agbd_se": Value('float32'), |
|
"elev_lowes": Value('float32'), |
|
"leaf_off_f": Value('uint8'), |
|
"pft_class": Value('uint8'), |
|
"region_cla": Value('uint8'), |
|
"rh98": Value('float32'), |
|
"sensitivity": Value('float32'), |
|
"solar_elev": Value('float32'), |
|
"urban_prop":Value('uint8')} |
|
|
|
|
|
default_input_features = {'S2_bands': ['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B09','B11', 'B12'], |
|
'S2_dates' : False, 'lat_lon': True, 'GEDI_dates': False, 'ALOS': True, 'CH': True, 'LC': True, |
|
'DEM': True, 'topo': False} |
|
|
|
|
|
s2_bands_idx = {'B01': 0, 'B02': 1, 'B03': 2, 'B04': 3, 'B05': 4, 'B06': 5, 'B07': 6, 'B08': 7, 'B8A': 8, 'B09': 9, 'B11': 10, 'B12': 11} |
|
|
|
|
|
norm_values = { |
|
'ALOS_bands': { |
|
'HH': {'mean': -10.381429, 'std': 8.561741, 'min': -83.0, 'max': 13.329468, 'p1': -83.0, 'p99': -2.1084213}, |
|
'HV': {'mean': -16.722847, 'std': 8.718428, 'min': -83.0, 'max': 11.688309, 'p1': -83.0, 'p99': -7.563843}}, |
|
'S2_bands': |
|
{'B01': {'mean': 0.12478869, 'std': 0.024433358, 'min': 1e-04, 'max': 1.8808, 'p1': 0.0787, 'p99': 0.1944}, |
|
'B02': {'mean': 0.13480005, 'std': 0.02822557, 'min': 1e-04, 'max': 2.1776, 'p1': 0.0925, 'p99': 0.2214}, |
|
'B03': {'mean': 0.16031432, 'std': 0.032037303, 'min': 1e-04, 'max': 2.12, 'p1': 0.1035, 'p99': 0.2556}, |
|
'B04': {'mean': 0.1532097, 'std': 0.038628064, 'min': 1e-04, 'max': 2.0032, 'p1': 0.1023, 'p99': 0.2816}, |
|
'B05': {'mean': 0.20312776, 'std': 0.04205057, 'min': 0.0422, 'max': 1.7502, 'p1': 0.1178, 'p99': 0.3189}, |
|
'B06': {'mean': 0.32636437, 'std': 0.07139242, 'min': 0.0502, 'max': 1.7245, 'p1': 0.1632, 'p99': 0.519}, |
|
'B07': {'mean': 0.36605212, 'std': 0.08555025, 'min': 0.0616, 'max': 1.7149, 'p1': 0.1775, 'p99': 0.6075}, |
|
'B08': {'mean': 0.3811653, 'std': 0.092815965, 'min': 1e-04, 'max': 1.7488, 'p1': 0.1691, 'p99': 0.646}, |
|
'B8A': {'mean': 0.3910436, 'std': 0.0896364, 'min': 0.055, 'max': 1.688, 'p1': 0.187, 'p99': 0.6385}, |
|
'B09': {'mean': 0.3910644, 'std': 0.0836445, 'min': 0.0012, 'max': 1.7915, 'p1': 0.2123, 'p99': 0.6238}, |
|
'B11': {'mean': 0.2917373, 'std': 0.07472579, 'min': 0.0953, 'max': 1.648, 'p1': 0.1334, 'p99': 0.4827}, |
|
'B12': {'mean': 0.21169408, 'std': 0.05880649, 'min': 0.0975, 'max': 1.6775, 'p1': 0.1149, 'p99': 0.3869}}, |
|
'CH': { |
|
'ch': {'mean': 9.736144, 'std': 9.493601, 'min': 0.0, 'max': 61.0, 'p1': 0.0, 'p99': 38.0}, |
|
'std': {'mean': 7.9882116, 'std': 4.549494, 'min': 0.0, 'max': 254.0, 'p1': 0.0, 'p99': 18.0}}, |
|
'DEM': { |
|
'mean': 604.63727, 'std': 588.02094, 'min': -82.0, 'max': 5205.0, 'p1': 4.0, 'p99': 2297.0}, |
|
'Sentinel_metadata': { |
|
'S2_vegetation_score': {'mean': 89.168724, 'std': 17.17321, 'min': 20.0, 'max': 100.0, 'p1': 29.0, 'p99': 100.0}, |
|
'S2_date': {'mean': 299.1638, 'std': 192.87402, 'min': -165.0, 'max': 623.0, 'p1': -105.0, 'p99': 602.0}}, |
|
'GEDI': { |
|
'agbd': {'mean': 66.97266, 'std': 98.66588, 'min': 0.0, 'max': 499.99985, 'p1': 0.0, 'p99': 429.7605}, |
|
'agbd_se': {'mean': 8.360701, 'std': 4.211524, 'min': 2.981795, 'max': 25.041483, 'p1': 2.9819136, 'p99': 17.13577}, |
|
'rh98': {'mean': 12.074685, 'std': 10.276359, 'min': -1.1200076, 'max': 111.990005, 'p1': 2.3599916, 'p99': 41.96}, |
|
'date': {'mean': 361.7431, 'std': 175.37294, 'min': 0.0, 'max': 624.0, 'p1': 5.0, 'p99': 619.0}} |
|
} |
|
|
|
|
|
NODATAVALS = {'S2_bands' : 0, 'CH': 255, 'ALOS_bands': -9999.0, 'DEM': -9999, 'LC': 255} |
|
|
|
|
|
REF_BIOMES = {20: 'Shrubs', 30: 'Herbaceous vegetation', 40: 'Cultivated', 90: 'Herbaceous wetland', 111: 'Closed-ENL', 112: 'Closed-EBL', 114: 'Closed-DBL', 115: 'Closed-mixed', 116: 'Closed-other', 121: 'Open-ENL', 122: 'Open-EBL', 124: 'Open-DBL', 125: 'Open-mixed', 126: 'Open-other'} |
|
_biome_values_mapping = {v: i for i, v in enumerate(REF_BIOMES.keys())} |
|
_ref_biome_values = [v for v in REF_BIOMES.keys()] |
|
|
|
|
|
|
|
|
|
def normalize_data(data, norm_values, norm_strat, nodata_value = None) : |
|
""" |
|
Normalize the data, according to various strategies: |
|
- mean_std: subtract the mean and divide by the standard deviation |
|
- pct: subtract the 1st percentile and divide by the 99th percentile |
|
- min_max: subtract the minimum and divide by the maximum |
|
|
|
Args: |
|
- data (np.array): the data to normalize |
|
- norm_values (dict): the normalization values |
|
- norm_strat (str): the normalization strategy |
|
|
|
Returns: |
|
- normalized_data (np.array): the normalized data |
|
""" |
|
|
|
if norm_strat == 'mean_std' : |
|
mean, std = norm_values['mean'], norm_values['std'] |
|
if nodata_value is not None : |
|
data = np.where(data == nodata_value, 0, (data - mean) / std) |
|
else : data = (data - mean) / std |
|
|
|
elif norm_strat == 'pct' : |
|
p1, p99 = norm_values['p1'], norm_values['p99'] |
|
if nodata_value is not None : |
|
data = np.where(data == nodata_value, 0, (data - p1) / (p99 - p1)) |
|
else : |
|
data = (data - p1) / (p99 - p1) |
|
data = np.clip(data, 0, 1) |
|
|
|
elif norm_strat == 'min_max' : |
|
min_val, max_val = norm_values['min'], norm_values['max'] |
|
if nodata_value is not None : |
|
data = np.where(data == nodata_value, 0, (data - min_val) / (max_val - min_val)) |
|
else: |
|
data = (data - min_val) / (max_val - min_val) |
|
|
|
else: |
|
raise ValueError(f'Normalization strategy `{norm_strat}` is not valid.') |
|
|
|
return data |
|
|
|
|
|
def normalize_bands(bands_data, norm_values, order, norm_strat, nodata_value = None) : |
|
""" |
|
This function normalizes the bands data using the normalization values and strategy. |
|
|
|
Args: |
|
- bands_data (np.array): the bands data to normalize |
|
- norm_values (dict): the normalization values |
|
- order (list): the order of the bands |
|
- norm_strat (str): the normalization strategy |
|
- nodata_value (int/float): the nodata value |
|
|
|
Returns: |
|
- bands_data (np.array): the normalized bands data |
|
""" |
|
|
|
for i, band in enumerate(order) : |
|
band_norm = norm_values[band] |
|
bands_data[i, :, :] = normalize_data(bands_data[i, :, :], band_norm, norm_strat, nodata_value) |
|
|
|
return bands_data |
|
|
|
|
|
def one_hot(x) : |
|
one_hot = np.zeros(len(_biome_values_mapping)) |
|
one_hot[_biome_values_mapping.get(x, 0)] = 1 |
|
return one_hot |
|
|
|
def encode_biome(lc, encode_strat, embeddings = None) : |
|
""" |
|
This function encodes the land cover data using different strategies: 1) sin/cosine encoding, |
|
2) cat2vec embeddings, 3) one-hot encoding. |
|
|
|
Args: |
|
- lc (np.array): the land cover data |
|
- encode_strat (str): the encoding strategy |
|
- embeddings (dict): the cat2vec embeddings |
|
|
|
Returns: |
|
- encoded_lc (np.array): the encoded land cover data |
|
""" |
|
|
|
if encode_strat == 'sin_cos' : |
|
|
|
lc_cos = np.where(lc == NODATAVALS['LC'], 0, (np.cos(2 * np.pi * lc / 201) + 1) / 2) |
|
lc_sin = np.where(lc == NODATAVALS['LC'], 0, (np.sin(2 * np.pi * lc / 201) + 1) / 2) |
|
return np.stack([lc_cos, lc_sin], axis = -1).astype(np.float32) |
|
|
|
elif encode_strat == 'cat2vec' : |
|
|
|
lc_cat2vec = np.vectorize(lambda x: embeddings.get(x, embeddings.get(0)), signature = '()->(n)')(lc) |
|
return lc_cat2vec.astype(np.float32) |
|
|
|
elif encode_strat == 'onehot' : |
|
lc_onehot = np.vectorize(one_hot, signature = '() -> (n)')(lc).astype(np.float32) |
|
return lc_onehot |
|
|
|
else: raise ValueError(f'Encoding strategy `{encode_strat}` is not valid.') |
|
|
|
|
|
def compute_num_features(input_features, encode_strat) : |
|
""" |
|
This function computes the number of features that will be used in the model. |
|
|
|
Args: |
|
- input_features (dict): the input features configuration |
|
- encode_strat (str): the encoding strategy |
|
|
|
Returns: |
|
- num_features (int): the number of features |
|
""" |
|
|
|
num_features = len(input_features['S2_bands']) |
|
if input_features['S2_dates'] : num_features += 3 |
|
if input_features['lat_lon'] : num_features += 4 |
|
if input_features['GEDI_dates'] : num_features += 3 |
|
if input_features['ALOS'] : num_features += 2 |
|
if input_features['CH'] : num_features += 2 |
|
if input_features['LC'] : |
|
num_features += 1 |
|
if encode_strat == 'sin_cos' : num_features += 2 |
|
elif encode_strat == 'cat2vec' : num_features += 5 |
|
elif encode_strat == 'onehot' : num_features += len(REF_BIOMES) |
|
if input_features['DEM'] : num_features += 1 |
|
if input_features['topo'] : num_features += 3 |
|
|
|
return num_features |
|
|
|
|
|
def concatenate_features(patch, lc_patch, input_features, encode_strat) : |
|
""" |
|
This function concatenates the features that the user requested. |
|
|
|
Args: |
|
- patch (np.array): the patch data |
|
- lc_patch (np.array): the land cover data |
|
- input_features (dict): the input features configuration |
|
- encode_strat (str): the encoding strategy |
|
|
|
Returns: |
|
- out_patch (np.array): the concatenated features |
|
""" |
|
|
|
|
|
num_features = compute_num_features(input_features, encode_strat) |
|
out_patch = np.zeros((num_features, patch.shape[1], patch.shape[2]), dtype = np.float32) |
|
|
|
|
|
current_idx = 0 |
|
|
|
|
|
s2_indices = [s2_bands_idx[band] for band in input_features['S2_bands']] |
|
out_patch[: current_idx + len(s2_indices)] = patch[s2_indices] |
|
current_idx += len(s2_indices) |
|
|
|
|
|
if input_features['S2_dates'] : |
|
out_patch[current_idx : current_idx + 3] = patch[12:15] |
|
current_idx += 3 |
|
|
|
|
|
if input_features['lat_lon'] : |
|
out_patch[current_idx : current_idx + 4] = patch[15:19] |
|
current_idx += 4 |
|
|
|
|
|
if input_features['GEDI_dates'] : |
|
out_patch[current_idx : current_idx + 3] = patch[19:22] |
|
current_idx += 3 |
|
|
|
|
|
if input_features['ALOS'] : |
|
out_patch[current_idx : current_idx + 2] = patch[22:24] |
|
current_idx += 2 |
|
|
|
|
|
if input_features['CH'] : |
|
out_patch[current_idx] = patch[24] |
|
out_patch[current_idx + 1] = patch[25] |
|
current_idx += 2 |
|
|
|
|
|
if input_features['LC'] : |
|
|
|
|
|
if encode_strat == 'sin_cos' : |
|
out_patch[current_idx : current_idx + 2] = lc_patch |
|
current_idx += 2 |
|
elif encode_strat == 'cat2vec' : |
|
out_patch[current_idx : current_idx + 5] = lc_patch |
|
current_idx += 5 |
|
elif encode_strat == 'onehot' : |
|
out_patch[current_idx : current_idx + len(REF_BIOMES)] = lc_patch |
|
current_idx += len(REF_BIOMES) |
|
elif encode_strat == 'none' : |
|
out_patch[current_idx] = lc_patch |
|
current_idx += 1 |
|
|
|
|
|
out_patch[current_idx] = patch[27] |
|
current_idx += 1 |
|
|
|
|
|
if input_features['topo'] : |
|
out_patch[current_idx : current_idx + 3] = patch[28:31] |
|
current_idx += 3 |
|
|
|
|
|
if input_features['DEM'] : |
|
out_patch[current_idx] = patch[31] |
|
current_idx += 1 |
|
|
|
return out_patch |
|
|
|
|
|
|
|
|
|
class NewDataset(datasets.GeneratorBasedBuilder): |
|
"""DatasetBuilder for AGBD dataset.""" |
|
def __init__(self, *args, input_features = default_input_features, additional_features = [], norm_strat = 'pct', |
|
encode_strat = 'sin_cos', patch_size = 15, **kwargs): |
|
|
|
self.inner_dataset_kwargs = kwargs |
|
self._is_streaming = False |
|
self.patch_size = patch_size |
|
|
|
assert norm_strat in ['mean_std', 'pct', 'none'], f'Normalization strategy `{norm_strat}` is not valid.' |
|
self.norm_strat = norm_strat |
|
|
|
assert encode_strat in ['sin_cos', 'cat2vec', 'onehot', 'none'], f'Encoding strategy `{encode_strat}` is not valid.' |
|
self.encode_strat = encode_strat |
|
|
|
self.input_features = input_features |
|
self.additional_features = additional_features |
|
|
|
if self.encode_strat == 'cat2vec' : |
|
embeddings = pd.read_csv("embeddings_train.csv") |
|
embeddings = dict([(v,np.array([a,b,c,d,e])) for v, a,b,c,d,e in zip(embeddings.mapping, embeddings.dim0, embeddings.dim1, embeddings.dim2, embeddings.dim3, embeddings.dim4)]) |
|
self.embeddings = embeddings |
|
else: self.embeddings = None |
|
|
|
super().__init__(*args, **kwargs) |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="default", version=VERSION, description="Normalized data"), |
|
datasets.BuilderConfig(name="unnormalized", version=VERSION, description="Unnormalized data"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "default" |
|
|
|
def as_streaming_dataset(self, split=None, base_path=None): |
|
self._is_streaming = True |
|
return super().as_streaming_dataset(split=split, base_path=base_path) |
|
|
|
def _info(self): |
|
|
|
all_features = { |
|
'input': datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value('float32')))), |
|
'label': Value('float32') |
|
} |
|
for feat in self.additional_features: |
|
all_features[feat] = feature_dtype[feat] |
|
features = datasets.Features(all_features) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
self.original_dataset = datasets.load_dataset("prs-eth/AGBD_raw", streaming=self._is_streaming) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"split": "train"}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"split": "validation"}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"split": "test"}), |
|
] |
|
|
|
def _generate_examples(self, split): |
|
for i, d in enumerate(self.original_dataset[split]): |
|
|
|
patch = np.asarray(d["input"]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if self.norm_strat != 'none' : |
|
|
|
|
|
patch[:12] = normalize_bands(patch[:12], norm_values['S2_bands'], ['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B09','B11', 'B12'], self.norm_strat, NODATAVALS['S2_bands']) |
|
|
|
|
|
patch[12] = normalize_data(patch[12], norm_values['Sentinel_metadata']['S2_date'], 'min_max' if self.norm_strat == 'pct' else self.norm_strat) |
|
|
|
|
|
patch[19] = normalize_data(patch[19], norm_values['GEDI']['date'], 'min_max' if self.norm_strat == 'pct' else self.norm_strat) |
|
|
|
|
|
patch[22:24] = normalize_bands(patch[22:24], norm_values['ALOS_bands'], ['HH', 'HV'], self.norm_strat, NODATAVALS['ALOS_bands']) |
|
|
|
|
|
patch[24] = normalize_data(patch[24], norm_values['CH']['ch'], self.norm_strat, NODATAVALS['CH']) |
|
patch[25] = normalize_data(patch[25], norm_values['CH']['std'], self.norm_strat, NODATAVALS['CH']) |
|
|
|
|
|
patch[31] = normalize_data(patch[31], norm_values['DEM'], self.norm_strat, NODATAVALS['DEM']) |
|
|
|
|
|
if self.encode_strat != 'none' : lc_patch = encode_biome(patch[26], self.encode_strat, self.embeddings).swapaxes(-1,0) |
|
else: lc_patch = patch[26] |
|
|
|
|
|
patch[27] = patch[27] / 100 |
|
|
|
|
|
|
|
|
|
out_patch = concatenate_features(patch, lc_patch, self.input_features, self.encode_strat) |
|
|
|
|
|
|
|
|
|
start_x = (patch.shape[1] - self.patch_size) // 2 |
|
start_y = (patch.shape[2] - self.patch_size) // 2 |
|
out_patch = out_patch[:, start_x : start_x + self.patch_size, start_y : start_y + self.patch_size] |
|
|
|
|
|
|
|
|
|
data = {'input': out_patch, 'label': d["label"]} |
|
|
|
|
|
for feat in self.additional_features: |
|
data[feat] = d["metadata"][feat] |
|
|
|
yield i, data |
|
|
|
|