repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
fl-analysis
|
fl-analysis-master/src/util.py
|
import collections
from copy import deepcopy
import numpy as np
import pandas as pd
from os.path import join
from tensorflow.python.keras.layers.convolutional import Conv2D
from tensorflow.python.keras.layers.core import Dense
def log_data(experiment_dir, rounds, accuracy, adv_success):
"""Logs data."""
df = pd.DataFrame()
df['round'] = rounds
df['accuracy'] = accuracy
df['adv_success'] = adv_success
df.to_csv(join(experiment_dir, 'log.csv'), index=False)
def power_iteration(A):
"""Computes principle eigenvalue and eigenvector.
Args:
A (np.ndarray): Square matrix.
Returns:
tuple: Tuple of eigenvalue and eigenvector of np.ndarray type.
"""
def eigenvalue(A, v):
Av = A.dot(v)
return v.dot(Av)
n, d = A.shape
v = np.ones(d) / np.sqrt(d)
ev = eigenvalue(A, v)
while True:
Av = A.dot(v)
v_new = Av / np.linalg.norm(Av)
ev_new = eigenvalue(A, v_new)
if np.abs(ev - ev_new) < 0.01:
break
v = v_new
ev = ev_new
return ev_new, v_new
def create_dropout_mask(model, federated_dropout_rate, federated_dropout_all_parameters, n_clients=1):
"""Applies dropout on model parameters as described in:
Caldas, S., Konečny, J., McMahan, H.B. and Talwalkar, A., 2018. Expanding the Reach of Federated Learning by
Reducing Client Resource Requirements. arXiv preprint arXiv:1812.07210
The fixed number of neurons for Dense layers (and filters for Conv2D layer) are zeroed out expect for
very first and last layers (unless federated_dropout_all_parameters is True). Biases are intact.
Args:
model (tf.model): Keras model.
federated_dropout_rate (float): Federated dropout rate in (0, 1) range.
federated_dropout_all_parameters (bool): Program parameter.
n_clients (int): How many non-overlapping dropout masks to create.
Returns:
For each client a list of np.ndarray that represent dropout mask.
"""
assert 0 < federated_dropout_rate < 1., 'Federated dropout rate must be in (0, 1) range.'
assert type(model.layers[0]) in [Conv2D, Dense], \
"The implementation assumes that the first layer is Dense or Conv2D"
layer_range = 1, len(model.layers) - 1
if federated_dropout_all_parameters:
layer_range = 0, len(model.layers)
dropout_mask = [[np.ones_like(l, dtype=bool) for l in model.get_weights()] for _ in range(n_clients)]
# elems_to_drop = 1.0 - federated_dropout_rate
layer_ind = layer_range[0] * 2 # since we skip the first layer
for ind in range(layer_range[0], layer_range[1]):
if type(model.layers[ind]) in [Conv2D, Dense]:
param_shape = model.layers[ind].weights[0].shape
if federated_dropout_all_parameters: # partially zeroed out filters
assert n_clients * federated_dropout_rate < 1
# param_shape = (kernel w, kernel h, prev layer filters, current layer filters)
total_params = np.prod(param_shape)
n_select = int(federated_dropout_rate * total_params) * n_clients
keep_inds = np.random.choice(total_params, n_select, replace=False)
keep_inds = keep_inds.reshape((n_clients, -1))
for client in range(n_clients):
layer_mask = np.zeros(np.prod(param_shape), dtype=bool)
layer_mask[keep_inds[client]] = True
dropout_mask[client][layer_ind] = layer_mask.reshape(param_shape)
else:
n_select = int(federated_dropout_rate * param_shape[-1]) * n_clients
keep_inds = np.random.choice(param_shape[-1], n_select, replace=True)
keep_inds = keep_inds.reshape((n_clients, -1))
for client in range(n_clients):
layer_mask = np.zeros_like(dropout_mask[client][layer_ind], dtype=bool)
layer_mask[..., keep_inds[client]] = True
dropout_mask[client][layer_ind] = layer_mask
layer_ind += 2 # ind*2 because we zero out only weights (not biases)
return dropout_mask
def aggregate_weights_masked(current_weights, global_learning_rate, num_clients, dropout_rate, client_dropout_mask, client_weight_list):
"""Procedure for merging client weights together with `global_learning_rate`."""
assert len(current_weights) == len(client_weight_list[0])
assert len(client_dropout_mask) == len(client_weight_list)
assert len(client_dropout_mask[0]) == len(client_weight_list[0])
new_weights = deepcopy(current_weights)
number_of_clients_participating_this_round = len(client_dropout_mask)
# Estimate impact of this update
update_coefficient = global_learning_rate / num_clients
client_weight_list_masked = []
for mask, w in zip(client_dropout_mask, client_weight_list):
client = []
for mask_l, w_l, old_w_l in zip(mask, w, current_weights):
update = w_l - old_w_l
update[mask_l == False] = float('nan')
client.append(update)
client_weight_list_masked.append(client)
client_weight_list_t = [list(i) for i in zip(*client_weight_list_masked)]
update_weight_list = [np.nan_to_num(np.nansum(w, axis=0)) for w in client_weight_list_t]
counts = [np.sum(np.array(list(i), dtype=np.int), axis=0) for i in zip(*client_dropout_mask)]
update_weight_list = [update_coefficient * w for w, c in zip(update_weight_list, counts)]
for layer in range(len(current_weights)):
new_weights[layer] = new_weights[layer] + \
update_weight_list[layer]
return new_weights
def flatten(d, parent_key='', sep='.'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
| 6,070 | 38.679739 | 136 |
py
|
fl-analysis
|
fl-analysis-master/src/test_tf_model.py
|
from unittest import TestCase
from src.tf_model import Model
from src.tf_data import Dataset
from matplotlib import pyplot
import tensorflow as tf
import numpy as np
class TestModel(TestCase):
def test_create_model_weight(self):
model = Model.create_model("dev")
(x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(128)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
initial_weights = model.get_weights()
bins = np.linspace(-0.001, 0.001, 100)
stddevs = []
for i in range(10):
with tf.GradientTape() as tape:
predictions = model(x_train, training=True)
loss_value = loss_object(y_true=y_train, y_pred=predictions)
grads = tape.gradient(loss_value, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
update = np.concatenate([np.reshape(initial_weights[i] - model.get_weights()[i], [-1]) for i in range(len(initial_weights))])
print(np.std(update))
stddevs.append(np.std(update))
# pyplot.hist(update, bins, alpha=1.0, label=f'Iteration {i+1}')
pyplot.plot(range(1, 11), stddevs, 'bo')
pyplot.legend(loc='upper right')
pyplot.show()
def test_create_model_weight_multbatches(self):
model = Model.create_model("dev")
(x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(12800)
batch_size = 128
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
initial_weights = model.get_weights()
bins = np.linspace(-0.001, 0.001, 100)
stddevs = []
xs = []
total_batches = int(x_train.shape[0] / batch_size)
for i in range(5):
for bid in range(total_batches):
batch_x = x_train[bid * batch_size:(bid + 1) * batch_size]
batch_y = y_train[bid * batch_size:(bid + 1) * batch_size]
with tf.GradientTape() as tape:
predictions = model(batch_x, training=True)
loss_value = loss_object(y_true=batch_y, y_pred=predictions)
grads = tape.gradient(loss_value, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
update = np.concatenate(
[np.reshape(initial_weights[i] - model.get_weights()[i], [-1]) for i in range(len(initial_weights))])
print(np.std(update))
stddevs.append(np.std(update))
xs.append(i + (bid / float(total_batches)))
# pyplot.hist(update, bins, alpha=1.0, label=f'Iteration {i+1}')
pyplot.plot(xs, stddevs)
pyplot.legend(loc='upper right')
pyplot.show()
| 3,013 | 36.209877 | 137 |
py
|
fl-analysis
|
fl-analysis-master/src/__init__.py
| 0 | 0 | 0 |
py
|
|
fl-analysis
|
fl-analysis-master/src/config_old.py
|
import sys
import configargparse
import logging
from src.client_attacks import Attack
parser = configargparse.ArgumentParser()
parser.add('-c', '--config_filepath', required=False, is_config_file=True, help='Path to config file.')
# logging configuration
parser.add_argument(
'-d', '--debug',
help="Print debug statements",
action="store_const", dest="loglevel", const=logging.DEBUG,
default=logging.WARNING,
)
parser.add_argument(
'-v', '--verbose',
help="Print verbose",
action="store_const", dest="loglevel", const=logging.INFO,
)
# client configuration
parser.add_argument('--num_clients', type=int, default=3, help='Total number of clients.')
parser.add_argument('--num_selected_clients', type=int, default=-1,
help='The number of selected clients per round; -1 to use all clients.')
parser.add_argument('--num_malicious_clients', type=int, default=0, help='Total number of malicious clients.')
parser.add_argument('--augment_data', type=str, default='false',
help='Whether to augment train/aux datasets',
choices=['true', 'false'])
# attacks
parser.add_argument('--attacks_config', type=str, default=None, help='Path to attack config.')
parser.add_argument('--attack_type', type=str, default='untargeted', help='Attack type.',
choices=['untargeted', 'backdoor', Attack.DEVIATE_MAX_NORM.value])
parser.add_argument('--estimate_other_updates', type=str, default='false',
help='Whether to estimate the update of the others.',
choices=['true', 'false'])
parser.add_argument('--attack_after', type=int, default=0, help='After which round to start behaving maliciously.')
parser.add_argument('--attack_stop_after', type=int, default=10000000, help='After which round to stop behaving maliciously.')
parser.add_argument('--attack_frequency', type=float, default=None, help='Frequency of malicious parties being selected. Default is None, for random selection')
parser.add_argument('--weight_regularization_alpha', type=float, default=[1], nargs='+',
help='Alpha value for weight regularization. Keep one for none.')
parser.add_argument('--attacker_full_dataset', type=str, default='false',
help='Whether the attack can access the full dataset',
choices=['true', 'false'])
parser.add_argument('--attacker_full_knowledge', type=str, default='false',
help='Whether the attacker has access to the benign updates in a specific round',
choices=['true', 'false'])
parser.add_argument('--permute_dataset', type=int, nargs='+', default=[], help='Use with caution. Run many attacks while permuting items in this list')
# attacks - untargeted
parser.add_argument('--untargeted_after_training', type=str, default='false',
help='Whether local model gradients are flipped in each local training iteration or when the local model is fully trained.',
choices=['true', 'false'])
# attacks - targeted_deterministic_attack
parser.add_argument('--targeted_deterministic_attack_objective', type=int, default=3,
help="All malicious clients try to make the model misclassify a given input as this predefined objective. Only Applicable if num_malicious_clients is non-zero value and 'attack_type' is 'targeted_deterministic'.")
# attacks - targeted
parser.add_argument('--targeted_attack_objective', type=int, default=[5, 7], nargs='+',
help="Malicious clients try to make the model classify every sample of a class (first arguments) as a target (second argument). Only applicable if num_malicious_clients is non-zero value and 'attack_type' is 'targeted'.")
parser.add_argument('--targeted_attack_benign_first', type=str, default='false', choices=['true', 'false'],
help="If set to true, the attack would perform benign training first and fine tune updates on malicious dataset. Applicable if attack_type is 'targeted'.")
# attacks - min loss
parser.add_argument('--aux_samples', type=int, default=-1,
help="Size of auxiliary dataset that is used for backdoor attack.")
parser.add_argument('--gaussian_noise', type=float, default=0,
help="Sigma value for gaussian noise that is added to aux samples if the value is > 0.")
parser.add_argument('--backdoor_type', type=str, default='semantic', help='Backdoor type. Semantic = backdoor_feature_*, tasks = Sun et al., edge = edge cases',
choices=['semantic', 'tasks', 'edge'])
parser.add_argument('--backdoor_stealth', type=str, default='false', help='Whether to use stealth in backdoor.',
choices=['true', 'false'])
parser.add_argument('--backdoor_attack_objective', type=int, default=[7, 1], nargs='+',
help="What class to mispredict `aux_samples` times. Only applicable if num_malicious_clients is non-zero value and 'attack_type' is 'segment_poisoning'.")
parser.add_argument('--backdoor_tasks', type=int, default=1,
help="Number of backdoor tasks to fill")
parser.add_argument('--mal_num_epochs_max', type=int, default=100, help="Maximum number of epochs to run the attack")
parser.add_argument('--mal_target_loss', type=float, default=0.1, help="Target threshold for training")
# attacks - edge case
parser.add_argument('--edge_case_type', type=str, default=None, help='Which edge case class to use')
# attacks - data poisoning
parser.add_argument('--poison_samples', type=int, default=1,
help="How many samples to poison in a batch")
parser.add_argument('--mal_num_batch', type=int, default=[200], nargs='+',
help="How many batches to run")
# attack - backdoor feature
parser.add_argument('--backdoor_feature_aux_train', type=int, default=[], nargs='+',
help="What samples to use as aux train set. Only applicable 'attack_type' is 'segment_poisoning' or 'model_replacement'.")
parser.add_argument('--backdoor_feature_aux_test', type=int, default=[], nargs='+',
help="What samples to use as aux test set. Only applicable 'attack_type' is 'segment_poisoning' or 'model_replacement'.")
parser.add_argument('--backdoor_feature_target', type=int, default=2,
help="Malicious target label")
parser.add_argument('--backdoor_feature_benign_regular', type=int, default=[], nargs='+',
help="Include specific benign samples in training from the dataset")
parser.add_argument('--backdoor_feature_remove_malicious', type=str, default='false', help='Whether to remove the malicious samples from the honest clients.',
choices=['true', 'false'])
parser.add_argument('--backdoor_feature_augment_times', type=int, default=0, help="How many times the eval samples should be augmented. Leave 0 for no augmentation")
# attack - backdoor contamination model
parser.add_argument('--contamination_model', action='store_true', default=False,
help='Whether attackers modify only a subset of neurons')
parser.add_argument('--contamination_rate', type=float, default=[None], nargs='+',
help='Percentage of neurons (filters) per layer that is modified by adversaries.'
'If only one value is specified, then the same contamination rate is used for all '
'convolutional and dense layers.')
# attacks - PGD
parser.add_argument('--pgd', type=str, default=None, choices=['l2', 'l_inf'],
help='(Projected Gradient Descent)'
'Weather malicious clients project their gradients onto the feasible set. '
'Compatible with all implemented attacks.')
parser.add_argument('--pgd_constraint', type=float, default=None,
help='Projection bound (applicable only if `pgd` is set).')
parser.add_argument('--pgd_clip_frequency', type=int, default=1,
help='Clip every x steps of SGD. Defaults to 1 (after every step).')
parser.add_argument('--pgd_adaptive', type=str, default="false", help="Whether to be adaptive in the gradient clipping (not sure if working).")
# attacks - boosting supplement
parser.add_argument('--scale_attack', type=str, default="false", help="Whether malicious clients scale their updates.")
parser.add_argument('--scale_attack_weight', type=float, default=[1.0], nargs='+',
help="A scaling factor for malicious clients' updates. Only applicable if scale_attack is set to true.")
# defense
parser.add_argument("--clip", type=float, default=None, help="A positive float value for absolute update clipping.")
parser.add_argument("--clip_l2", type=float, default=None, help="A positive float value for l2 update clipping.")
parser.add_argument("--clip_probability", type=float, default=1.0, help="Percentage of weights to clip")
parser.add_argument("--clip_layers", type=int, default=[], nargs='+', help="Indexes of layers to clip. Leave empty for all")
# data configuration
parser.add_argument("--data_distribution", type=str, default='IID', help="IID or non-IID.")
parser.add_argument("--number_of_samples", type=int, default=-1,
help="How many samples to use for training; default value of -1 indicates to use the full dataset.")
parser.add_argument("--dataset", type=str, default='mnist', help="Which dataset to use.", choices=['mnist', 'femnist', 'fmnist', 'cifar10'])
# training configuration
parser.add_argument("--model_name", type=str, default='dev', help="Which model to use.",
choices=['dev', 'mnist_cnn', 'bhagoji', 'resnet18', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet18_v2', 'resnet56_v2', 'dev_intrinsic', 'dev_fc_intrinsic', 'bhagoji_intrinsic', 'mnistcnn_intrinsic', 'lenet5_mnist', 'lenet5_cifar', 'lenet5_intrinsic', 'allcnn', 'allcnn_intrinsic'])
parser.add_argument("--num_rounds", type=int, default=40, help="Number of training rounds.")
parser.add_argument("--num_epochs", type=int, default=3, help="Number of client epochs.")
parser.add_argument("--num_test_batches", type=int, default=-1, help="Number of test batches to evaluate. -1 for max.")
parser.add_argument("--batch_size", type=int, default=128, help="Clients' batch size.")
parser.add_argument('--optimizer', type=str, default='Adam', help='Which optimizer to use.', choices=['Adam', 'SGD'])
parser.add_argument('--learning_rate', type=float, default=0.0001, nargs="+", help='Learning rate for selected optimizer.')
parser.add_argument('--lr_decay', type=str, default='None', help='Apply decay to the learning rate.',
choices=['None', 'exponential', 'boundaries'])
parser.add_argument('--decay_steps', type=float, default=None, help='Decay steps for exponential decay.')
parser.add_argument('--decay_rate', type=float, default=None, help='Decay rate for exponential decay.')
parser.add_argument('--decay_boundaries', type=int, default=[], nargs="+", help='Boundaries for boundaries decay mode')
parser.add_argument('--decay_values', type=float, default=[], nargs="+", help='Values for boundaries decay mode')
parser.add_argument('--regularization_rate', type=float, default=None, help='Weight regularization rate.')
parser.add_argument('--mal_learning_rate', type=float, default=[], nargs="+", help='Malicious learning rate for selected optimizer.')
parser.add_argument('--mal_decay_steps', type=float, default=None, help='Malicious decay steps for exponential decay.')
parser.add_argument('--mal_decay_rate', type=float, default=None, help='Malicious decay rate for exponential decay.')
parser.add_argument('--mal_num_epochs', type=int, default=None, help='How many malicious epochs to run')
parser.add_argument('--mal_step_learning_rate', type=str, default='false', help='Whether to step the learning rate.',
choices=['true', 'false'])
parser.add_argument('--federated_dropout_rate', type=float, default=1.0,
help='Percentage of neurons (or filters for convolutional layers) that are kept on each layer.')
parser.add_argument('--federated_dropout_all_parameters', action='store_true', default=False,
help='If set to True, applies dropout on all parameters randomly according to the dropout rate.'
'Applicable only if federated_dropout_rate < 1.0.')
parser.add_argument('--federated_dropout_nonoverlap', action='store_true', default=False,
help="Each client receives a unique mask that is not overlapped with other clients' masks."
'Applicable only if federated_dropout_rate < 1.0.')
parser.add_argument('--federated_dropout_randommask', type=str, default='false',
help="Enable low rank mode instead of federated dropout, i.e. only mask the uplink.")
parser.add_argument('--global_gaussian_noise', type=float, default=0.0,
help='Gaussian noise to add to the global model for the server.')
parser.add_argument('--global_learning_rate', type=float, default=-1, help='Global learning rate for the server.')
parser.add_argument("--aggregator", type=str, default='FedAvg', help="Aggregator type. Supported: FedAvg, TrimmedMean")
parser.add_argument('--trimmed_mean_beta', type=float, default=0.1, help='Beta value of trimmed mean. 0 < beta < 1/2.')
parser.add_argument("--intrinsic_dimension", type=int, default=1000, help="Size of intrinsic dimension. Only applicable if using subspace machine learning model.")
parser.add_argument("--load_model", type=str, default=None, help="Path to load an existing model to initialize the setup.")
parser.add_argument('--ignore_malicious_update', type=str, default="false", help="Whether to ignore malicious updates in training.")
parser.add_argument('--quantization', type=str, default=None, help='Whether to use (probabilistic) quantization', choices=['deterministic', 'probabilistic', 'd', 'p'])
parser.add_argument('--q_bits', type=int, default=None, help='Number of bits of the fixed-point number to represent the weights for quantization')
parser.add_argument('--q_frac', type=int, default=None, help='Number of fractional bits of the fixed-point number for quantization')
# logging
parser.add_argument("--experiment_name", type=str, default='tmp', help="Sub-directory where the log files are stored.")
parser.add_argument("--print_every", type=int, default=1,
help="After how many rounds to perform and log evaluation on test set.")
parser.add_argument("--save_updates", type=str, default='true', help="Whether to save the weight updates. Disable for large models / large number of clients.",
choices=['true', 'false'])
parser.add_argument("--save_norms", type=str, default='false', help="Whether to save the norms for all clients",
choices=['true', 'false'])
parser.add_argument("--save_weight_distributions", type=str, default='false', help="Whether to save the weight distributions for all clients",
choices=['true', 'false'])
parser.add_argument("--keep_history", action='store_true', default=False,
help='Whether Server keeps parameter history.'
'Warning: It slows down the training because of principle eigenvalue computation.')
parser.add_argument("--save_model_at", type=int, default=[], nargs='+', help="At what rounds to save model.")
# hyperparameter tuning
parser.add_argument("--hyperparameter_tuning", type=str, default='false', help="Whether to use hyperparameter tuning", choices=['true', 'false'])
parser.add_argument("--tune_attack_clients", type=int, nargs='+', default=[-1], help="Helper for hyperparameter tuning to set the number of clients + scale_attack_weight")
parser.add_argument("--tune_attack_clients_selected_frac", type=float, default=None, help="Fraction of clients to be selected")
parser.add_argument("--hyperparameters_tuned", type=str, nargs='+', default=[], help="Which hyperparams are being tuned at the moment")
# experiment reproducibility
parser.add_argument("--seed", type=int, default=0,
help="Seed for random functions. Ensures experiment reproducibility.")
# computational optimization
parser.add_argument("--workers", type=int, default=1, help="How many threads to use for client training simulation.")
parser.add_argument("--optimized_training", type=str, default='true', help="Use optimized training loop where possible.", choices=['true', 'false'])
def get_config():
args = parser.parse_args()
logging.basicConfig(level=args.loglevel)
root = logging.getLogger()
root.setLevel(logging.DEBUG)
# handler = logging.StreamHandler(sys.stdout)
# handler.setLevel(logging.DEBUG)
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# handler.setFormatter(formatter)
# root.addHandler(handler)
config = dict()
config['num_clients'] = args.num_clients
if args.num_selected_clients == -1:
config['num_selected_clients'] = args.num_clients
else:
config['num_selected_clients'] = args.num_selected_clients
config['num_malicious_clients'] = args.num_malicious_clients
config['augment_data'] = True if args.augment_data.lower() == "true" else False
config['weight_regularization_alpha'] = args.weight_regularization_alpha[0]
config['attack_type'] = args.attack_type
config['untargeted_after_training'] = True if args.untargeted_after_training.lower() == "true" else False
config['targeted_deterministic_attack_objective'] = args.targeted_deterministic_attack_objective
config['targeted_attack_objective'] = tuple(args.targeted_attack_objective)
config['targeted_attack_benign_first'] = True if args.targeted_attack_benign_first.lower() == 'true' else False
config['scale_attack'] = True if args.scale_attack.lower() == "true" else False
config['scale_attack_weight'] = args.scale_attack_weight[0]
config['data_distribution'] = args.data_distribution
config['estimate_other_updates'] = True if args.estimate_other_updates.lower() == "true" else False
config['num_rounds'] = args.num_rounds
config['num_epochs'] = args.num_epochs
config['mal_num_epochs'] = args.mal_num_epochs if args.mal_num_epochs is not None else args.num_epochs
config['batch_size'] = args.batch_size
config['num_test_batches'] = args.num_test_batches if args.num_test_batches > -1 else sys.maxsize
config['optimizer'] = args.optimizer
config['learning_rate'] = args.learning_rate[0] if isinstance(args.learning_rate, list) and len(args.learning_rate) > 0 else args.learning_rate
config['lr_decay'] = args.lr_decay if args.lr_decay != 'None' else None
config['decay_steps'] = args.decay_steps
config['decay_rate'] = args.decay_rate
config['decay_boundaries'] = args.decay_boundaries
config['decay_values'] = args.decay_values
config['regularization_rate'] = args.regularization_rate
config['mal_learning_rate'] = args.mal_learning_rate[0] if len(args.mal_learning_rate) > 0 else config['learning_rate']
config['mal_decay_steps'] = args.mal_decay_steps if args.mal_decay_steps is not None else args.decay_steps
config['mal_decay_rate'] = args.mal_decay_rate if args.mal_decay_rate is not None else args.decay_rate
config['mal_step_learning_rate'] = True if args.mal_step_learning_rate.lower() == "true" else False
config['aggregator'] = args.aggregator
config['trimmed_mean_beta'] = args.trimmed_mean_beta
config['global_learning_rate'] = args.global_learning_rate
config['global_gaussian_noise'] = args.global_gaussian_noise
config['federated_dropout_rate'] = args.rate
config['federated_dropout_all_parameters'] = args.all_parameters
config['federated_dropout_nonoverlap'] = args.nonoverlap
config['federated_dropout_randommask'] = True if args.randommask.lower() == "true" else False
config['intrinsic_dimension'] = args.intrinsic_dimension
config['ignore_malicious_update'] = True if args.ignore_malicious_update.lower() == "true" else False
config['quantization'] = args.quantization
if config['quantization'] == 'p':
config['quantization'] = 'probabilistic'
elif config['quantization'] == 'd':
config['quantization'] = 'deterministic'
config['q_bits'] = args.q_bits
config['q_frac'] = args.q_frac
assert 0 < args.rate <= 1, 'Federated dropout rate must be in (0, 1] range.'
config['experiment_name'] = args.experiment_name
config['print_every'] = args.print_every
config['save_updates'] = True if args.save_updates.lower() == 'true' else False
config['keep_history'] = args.keep_history
config['save_model_at'] = args.save_model_at
config['load_model'] = args.load_model
config['save_norms'] = True if args.save_norms.lower() == 'true' else False
config['save_weight_distributions'] = True if args.save_weight_distributions.lower() == 'true' else False
config['model_name'] = args.model_name
if args.clip is not None and args.clip != 0:
assert args.clip > 0, '`clip` parameter must be a non-negative float.'
config['clip'] = args.clip if args.clip is not None and args.clip != 0 else None
config['clip_probability'] = args.clip_probability
config['clip_l2'] = args.clip_l2
config['clip_layers'] = args.clip_layers
config['dataset'] = args.dataset
config['workers'] = args.workers
config['number_of_samples'] = args.number_of_samples
config['aux_samples'] = args.aux_samples if args.aux_samples != -1 else sys.maxsize
config['mal_num_epochs_max'] = args.mal_num_epochs_max
config['mal_target_loss'] = args.mal_target_loss
config['backdoor_type'] = args.backdoor_type
config['backdoor_stealth'] = True if args.backdoor_stealth.lower() == 'true' else False
config['backdoor_attack_objective'] = None if args.backdoor_attack_objective[0] == -1 else tuple(args.backdoor_attack_objective)
config['edge_case_type'] = args.edge_case_type
config['attack_after'] = args.attack_after
config['attack_stop_after'] = args.attack_stop_after
config['attack_frequency'] = args.attack_frequency if args.attack_frequency != -1 else None
config['attacker_full_dataset'] = True if args.attacker_full_dataset.lower() == "true" else False
config['attacker_full_knowledge'] = True if args.attacker_full_knowledge.lower() == "true" else False
config['backdoor_tasks'] = args.backdoor_tasks if args.num_malicious_clients > 0 else 0
config['backdoor_feature_aux_train'] = args.backdoor_feature_aux_train
config['backdoor_feature_aux_test'] = args.backdoor_feature_aux_test
config['backdoor_feature_target'] = args.backdoor_feature_target
config['backdoor_feature_benign_regular'] = args.backdoor_feature_benign_regular
config['backdoor_feature_remove_malicious'] = True if args.backdoor_feature_remove_malicious.lower() == "true" else False
config['backdoor_feature_augment_times'] = args.backdoor_feature_augment_times
config['poison_samples'] = args.poison_samples
config['mal_num_batch'] = args.mal_num_batch[0]
config['optimized_training'] = True if args.optimized_training.lower() == "true" else False
assert args.gaussian_noise >= 0.
config['gaussian_noise'] = args.gaussian_noise
config['contamination_model'] = args.contamination_model
config['contamination_rate'] = _preprocess_contamination_rate(args)
if args.pgd is not None:
assert args.pgd_constraint is not None, "PGD constraint value must be set."
config['pgd'] = args.pgd
config['pgd_constraint'] = args.pgd_constraint
config['pgd_clip_frequency'] = args.pgd_clip_frequency
config['pgd_adaptive'] = True if args.pgd_adaptive.lower() == 'true' else False
logging.info(config)
logging.warning("Can I see this?")
return config, args
def _preprocess_contamination_rate(args):
if not args.contamination_model:
return args.contamination_rate
assert args.contamination_rate[0] is not None, "Contamination rate must be specified."
from src.tf_model import Model
from tensorflow.python.keras.layers.convolutional import Conv2D
from tensorflow.python.keras.layers.core import Dense
model = Model.create_model(args.model_name)
n_layers = len([1 for layer in model.layers if type(layer) in [Conv2D, Dense]])
if len(args.contamination_rate) == 1:
return tuple(args.contamination_rate * n_layers)
assert len(args.contamination_rate) == n_layers, f"The number of specified values does not align with the number " \
f"of layers ({len(args.contamination_rate)} != {n_layers})"
return tuple(args.contamination_rate)
| 24,879 | 63.455959 | 313 |
py
|
fl-analysis
|
fl-analysis-master/src/hyperparameter_tuning.py
|
import os
from tensorboard.plugins.hparams import api as hp
import tensorflow as tf
import numpy as np
from src.federated_averaging import FederatedAveraging
from src.tf_model import Model
def load_model(args, config):
if args.load_model is not None:
model = tf.keras.models.load_model(args.load_model) # Load with weights
else:
model = Model.create_model(args.model_name, config['intrinsic_dimension'], config['regularization_rate'])
return model
def tune_hyper(args, config):
learning_rate = args.learning_rate if isinstance(args.learning_rate, list) and len(args.learning_rate) > 0 else [args.learning_rate]
HP_LR = hp.HParam('learning_rate', hp.Discrete(learning_rate))
HP_MAL_NUM_BATCH = hp.HParam('mal_num_batch', hp.Discrete(args.mal_num_batch))
mal_lr = args.mal_learning_rate if isinstance(args.mal_learning_rate, list) and len(args.mal_learning_rate) > 0 else [args.learning_rate]
HP_MAL_LR = hp.HParam('mal_learning_rate', hp.Discrete(mal_lr))
HP_WEIGHT_REG = hp.HParam('weight_regularization_alpha', hp.Discrete(args.weight_regularization_alpha))
HP_WEIGHT_SCALE = hp.HParam('scale_attack_weight', hp.Discrete(args.scale_attack_weight))
# NUM_ClIENTS = hp.HParam('mal_learning_rate', hp.Discrete(args.mal_learning_rate))
HP_NUM_CLIENTS_SETUP = hp.HParam('num_clients_attack', hp.Discrete(args.tune_attack_clients))
METRIC_ACCURACY = 'evaluation/test_accuracy'
METRIC_ADV_SUCCESS = 'evaluation/adv_success'
experiment_root_dir = os.path.join(os.getcwd(), 'experiments')
experiment_dir = os.path.join(experiment_root_dir, args.experiment_name)
with tf.summary.create_file_writer(experiment_dir).as_default():
hp.hparams_config(
hparams=[HP_LR, HP_MAL_NUM_BATCH, HP_MAL_LR, HP_WEIGHT_REG, HP_WEIGHT_SCALE, HP_NUM_CLIENTS_SETUP],
metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy'),
hp.Metric(METRIC_ADV_SUCCESS, display_name='Adversarial Success')],
)
session_num = 0
for lr in HP_LR.domain.values:
for mal_lr in HP_MAL_LR.domain.values:
for mal_num_batch in HP_MAL_NUM_BATCH.domain.values:
for wr in HP_WEIGHT_REG.domain.values:
for scale in HP_WEIGHT_SCALE.domain.values:
for num_clients_att in HP_NUM_CLIENTS_SETUP.domain.values:
hparams_dict = {
HP_MAL_NUM_BATCH.name: mal_num_batch,
HP_MAL_LR.name: mal_lr,
HP_WEIGHT_REG.name: wr,
HP_WEIGHT_SCALE.name: scale,
HP_NUM_CLIENTS_SETUP.name: num_clients_att,
HP_LR.name: lr
}
config_run = config
config_run["learning_rate"] = lr
config_run["mal_num_batch"] = mal_num_batch
config_run["mal_learning_rate"] = mal_lr
config_run["weight_regularization_alpha"] = wr
if num_clients_att != -1:
# glob_lr = args.global_learning_rate if args.global_learning_rate == -1
selected = int(num_clients_att * args.tune_attack_clients_selected_frac)
config_run["num_selected_clients"] = selected
config_run["num_clients"] = num_clients_att
config_run["scale_attack_weight"] = num_clients_att / args.global_learning_rate # assumes nom. learning_rate
# TODO: Autocalc global lr for full scale
# if args.global_learning_rate == -1:
# config_run["scale_attack_weight"] = num_clients_att / selected
# else:
# config_run["scale_attack_weight"] = num_clients_att / selected
else:
config_run["scale_attack_weight"] = scale
run = f"run-{session_num}"
run_dir = os.path.join(experiment_dir, run)
run_dir = os.path.join(run_dir, "events")
with tf.summary.create_file_writer(run_dir).as_default():
hp.hparams(hparams_dict) # record the values used in this trial
print(hparams_dict)
np.random.seed(args.seed)
tf.random.set_seed(args.seed)
if not Model.model_supported(args.model_name, args.dataset):
raise Exception(
f'Model {args.model_name} does not support {args.dataset}! Check method Model.model_supported for the valid combinations.')
models = [load_model(args, config) for i in range(args.workers)]
server_model = FederatedAveraging(config, models, run)
server_model.init()
server_model.fit()
accuracy, adv_success, test_loss = server_model.evaluate()
# with tf.summary.create_file_writer(run_dir).as_default():
# tf.summary.scalar(METRIC_ACCURACY, accuracy, server_model.num_rounds)
# tf.summary.scalar(METRIC_ADV_SUCCESS, adv_success, server_model.num_rounds)
session_num += 1
metrics_dict = {
METRIC_ACCURACY: accuracy,
METRIC_ADV_SUCCESS: adv_success
}
server_model.write_hparams(hparams_dict, metrics_dict)
| 6,049 | 52.539823 | 159 |
py
|
fl-analysis
|
fl-analysis-master/src/config/definitions.py
|
from dataclasses import dataclass, MISSING, field
from typing import Optional, Dict, Any, List
from mashumaro.mixins.yaml import DataClassYAMLMixin
"""
This class defines the configuration schema of the framework.
"""
@dataclass
class Quantization(DataClassYAMLMixin):
"""
Apply quantization to the client updates, to simulate conversion to fixed-point representation in the crypto domain
(secure aggregation).
"""
type: str = MISSING # "probabilistic" or "deterministic"
"""Type of quantization to be used. Either `probabilistic` or `deterministic`."""
bits: int = MISSING
"""Number of total bits per parameter to quantize to."""
frac: int = MISSING
"""Number of bits to use for the fractional part of the number."""
@dataclass
class HyperparameterConfig(DataClassYAMLMixin):
"""
Config for hyperparameters being tuned in this run.
"""
args: Dict[str, Any]
"""Free-format dictionary of key-value pairs, where `key` must be string and `value` can be any type that tfevents
can handle."""
# in the future.. config of values to log and when.
@dataclass
class Environment(DataClassYAMLMixin):
"""Defines the environment of the experiment.
In addition, this config section has additional settings for logging different statistics by the framework."""
num_clients: int = MISSING
"""Number of clients"""
num_selected_clients: int = MISSING
"""Number of clients selected in each round"""
num_malicious_clients: int = MISSING
"""Number of malicious clients in total"""
experiment_name: str = MISSING
"""Name of the experiment (used to create the logging directory)"""
num_selected_malicious_clients: int = None
"""Number of malicious clients selected in each round"""
malicious_client_indices: Optional[List[int]] = None
"""Select a specific set of clients to be malicious, based on client index"""
attack_frequency: Optional[float] = None #
"""Frequency of malicious parties being selected. Default is None, for random selection"""
# these should be removed in the future
attacker_full_dataset: bool = False #
""" Whether the attacker has the full dataset."""
attacker_full_knowledge: bool = False
"""Whether the attacker has full knowledge of the other client updates in a given round."""
load_model: Optional[str] = None
"""Load a model to be used in training instead of a randomly initialized model.
Can be used to load pre-trained models."""
ignore_malicious_update: bool = False
"""Compute a malicious update, but ignore the update in aggregation.
Can be used to compute malicious client update statistics over time without compromising the model."""
print_every: int = 1
"""Defines after how many rounds should the framework evaluate the accuracy of the model."""
print_batch_text: bool = False
"""For character-prediction tasks, print batch of test set including prediction"""
save_updates: bool = False
"""Save all client updates in each round (Note: Quickly takes up a significant amount of storage space)."""
save_norms: bool = False
"""Log the norms of the client updates to a file."""
save_weight_distributions: bool = False
"""Save the weight update distributions to tfevents files."""
save_history: bool = False
"""Save the history of the global models on the server."""
save_model_at: List[int] = field(default_factory=lambda: [])
"""Framework saves the global model to a file at these rounds."""
save_weight_outside_bound: Optional[float] = None # to keep track of weights outside the l_inf bound
"""Keep track of how many weights are outside the given l_inf bound."""
print_backdoor_eval: bool = False
"""Whether to log the raw backdoor predictions of the model (at evaluation time)"""
seed: int = 0 # randomness seed
"""Randomness seed"""
use_config_dir: bool = False # whether to use the config parent dir as the target experiment dir
"""Whether to create a separate experiments output directory.
If `False` (default), the directory of the config YAML file is used as output directory."""
limit_tf_gpu_mem_mb: Optional[int] = None
"""Provide memory limit (MB) for tensorflow to allocate on the GPU, to leave space for other operations."""
@dataclass
class Dataset(DataClassYAMLMixin):
"""Defines the dataset to use."""
dataset: str = MISSING
"""Dataset type. Supported: `mnist`, `femnist`, `cifar10`"""
data_distribution: str = MISSING
"""Data distribution over the clients. specify `IID` for I.I.D. distribution, `NONIID` otherwise."""
number_of_samples: int = -1
"""Number of samples to use in the dataset. -1 means all samples"""
augment_data: bool = False
"""Whether to augment the data with random horizontal flips and horizontal/vertical shifts. Used for CIFAR-10."""
normalize_mnist_data: bool = False # Legacy flag, CIFAR is normalized
"""Legacy flag, whether to normalize the MNIST dataset"""
@dataclass
class FederatedDropout(DataClassYAMLMixin):
"""Defines federated dropout behavior."""
rate: float = 1.0
"""Dropout rate. Keep `rate` percentage of parameters"""
all_parameters: bool = True
"""If set to True, applies dropout on all parameters randomly according to the dropout rate."""
nonoverlap: bool = False
"""Each client receives a unique mask that is not overlapped with other clients' masks."""
randommask: bool = False
"""Enable low rank mode instead of federated dropout, i.e. only mask the uplink."""
@dataclass
class Server(DataClassYAMLMixin):
"""Defines server behavior."""
num_rounds: int = MISSING # Number of training rounds.
"""Number of training rounds"""
num_test_batches: int = MISSING # Number of client epochs.
"""Number of batches to evaluate on in each evaluation"""
federated_dropout: Optional[FederatedDropout] = None
"""Federated dropout configuration"""
aggregator: Optional[Dict[str, Any]] = None
"""Aggregator to use. Default is FedAvg."""
global_learning_rate: Optional[float] = None
"""Global learning rate"""
intrinsic_dimension: int = 1000
"""For subspace learning, the size of the intrinsic dimension."""
gaussian_noise: float = 0.0
"""Amount (sigma) of centered around 0 Gaussian noise to add to the model update each round."""
@dataclass
class LearningDecay(DataClassYAMLMixin):
"""Defines a learning rate decay schedule"""
type: str = MISSING # exponential or boundaries
"""Type of learning rate decay. Supported: `exponential` for exponential decay and `boundaries` for
decay at arbitrary step boundaries."""
# exponential
decay_steps: Optional[int] = None
"""The number of steps after which the learning rate decays each time (requires `exponential` type)."""
decay_rate: Optional[float] = None
"""The rate at which the learning rate decays (requires `exponential` type)."""
# boundaries
decay_boundaries: Optional[List[int]] = None
"""The list of decay boundaries (requires `boundaries` type)."""
decay_values: Optional[List[float]] = None
"""The list of decay multiples corresponding to the areas defined by the boundaries (requires `boundaries` type)."""
# epochs
step_epochs: bool = False
"""Boundaries and steps are expressed as epoch"""
@dataclass
class TrainingConfig(DataClassYAMLMixin):
"""Client training configuration"""
num_epochs: int = MISSING
"""Number of training epochs"""
batch_size: int = MISSING # Client batch size
""" """
learning_rate: float = MISSING
""" """
decay: Optional[LearningDecay] = None
"""Optional learning rate decay schedule"""
optimizer: str = "Adam" # Optimizer
"""Optimizer to use"""
regularization_rate: Optional[float] = None
"""Use L2 regularization to limit the size of the model `update` (not the model itself)."""
@dataclass
class NormBound(DataClassYAMLMixin):
"""Enforce a norm bound"""
type: str = MISSING # l2, linf, median_l2
"""Type of norm bound. Supported: `l2`, `linf`, `median_l2`"""
value: float = MISSING
"""Norm bound value"""
probability: Optional[float] = None # in case of linf, random clip
"""(`linf`) Legacy option to support clipping of randomly selected parameters"""
@dataclass
class MaliciousConfig(DataClassYAMLMixin):
"""Malicious training configuration. A malicious training configuration is largely defined by an attack objective
(targeted or untargeted), an evasion method (to evade a potential defense such as a norm bound) and a backdoor
that defines the specific set of samples to poison."""
objective: Dict[str, Any] = MISSING
"""Attack objective. Corresponds with attack classes. Supported: `TargetedAttack`, `UntargetedAttack`."""
evasion: Optional[Dict[str, Any]] = None
"""Evasion method. Supported: `NormBoundPGDEvasion` or `TrimmedMeanEvasion`"""
backdoor: Optional[Dict[str, Any]] = None
"""Backdoor type. Supports several kinds of backdoors, see the examples for more details. Support types: `tasks`
(select a set handwriters to poison), `edge_case` (select a set of external, edge-case samples),
`semantic` (define specific samples by id to poison)
"""
attack_type: Optional[str] = None
"""Legacy option, `targeted` or `untargeted`"""
estimate_other_updates: bool = False
"""Estimate the update that the clients will send based on the difference of the global model with
the previous round, based on an idea proposed by Bhagoji et al."""
attack_start: Optional[int] = 0
"""Round after which to start attacking"""
attack_stop: Optional[int] = 10000000
"""Round after which to stop attacking"""
# In a scaling attack with multiple attackers, whether attackers should divide
# a single malicious update amongst themselves.
multi_attacker_scale_divide: Optional[bool] = None
"""When multiple attackers are selected in a single round, one attacker computes an update that all
selected attackers then submit to the server. This can be useful to improve norm bound evasion.
The default behavior (when this flag is `false` or unspecified) is that all
attackers compute an update independently."""
@dataclass
class ClientConfig(DataClassYAMLMixin):
"""Defines the behavior of the clients."""
model_name: str = MISSING
"""What model to use. Note: Not all model / dataset combinations are supported. Supported models:
`dev`, `mnist_cnn`, `bhagoji`, `resnet18`, `resnet32`, `resnet44`, `resnet56`, `resnet110`, `resnet18_v2`,
`resnet56_v2`, `dev_intrinsic`, `dev_fc_intrinsic`, `bhagoji_intrinsic`, `mnistcnn_intrinsic`, `lenet5_mnist`,
`lenet5_cifar`, `lenet5_intrinsic`, `allcnn`, `allcnn_intrinsic`"""
benign_training: TrainingConfig = MISSING
"""Training config of the clients"""
quantization: Optional[Quantization] = None
"""Optional probabilistic quantization configuration"""
malicious: Optional[MaliciousConfig] = None
"""Optional configuration for the malicious clients"""
optimized_training: bool = True # whether to create the TF training loop
"""Whether to use optimized Tensorflow training. This should work by default, but when a large amount of clients
(> 5000) is used with limited GPU memory, the training process may run out of memory after training for some time."""
gaussian_noise: Optional[float] = None
"""Sigma of 0-centered gaussian noise to apply to the training samples"""
clip: Optional[NormBound] = None
"""Apply a norm bound to the client updates"""
model_weight_regularization: Optional[float] = None # weight reg for model (l2)
"""Weight regularization of the model (l2)"""
debug_client_training: bool = False
"""Debug client training process. Logs per-epoch training loss."""
disable_bn: bool = False
"""Disable batchnorm in resnet models"""
@dataclass
class Config(DataClassYAMLMixin):
"""Config root that defines the sections that can/must be specified."""
environment: Environment = MISSING
"""Details about the FL environment of the experiment"""
server: Server = MISSING
"""Server configuration."""
client: ClientConfig = MISSING
"""Client configuration."""
dataset: Dataset = MISSING
"""Dataset configuration."""
hyperparameters: Optional[HyperparameterConfig] = None
"""Hyperparameters that are used this run. This is a free-format section and can be used to log hyperparameters
in the tfevents format to later analyze using Tensorboard."""
| 12,695 | 44.342857 | 121 |
py
|
fl-analysis
|
fl-analysis-master/src/config/test_config.py
|
import unittest
from config import load_config
class ConfigTest(unittest.TestCase):
def test_load_config(self):
load_config("example_config.yaml")
if __name__ == '__main__':
unittest.main()
| 212 | 14.214286 | 42 |
py
|
fl-analysis
|
fl-analysis-master/src/config/config.py
|
from .definitions import Config
def load_config(config_name):
with open(config_name, "rb") as f:
config = Config.from_yaml(f.read())
return config
| 167 | 15.8 | 43 |
py
|
fl-analysis
|
fl-analysis-master/src/config/__init__.py
|
from .config import load_config
from .definitions import Environment
| 69 | 22.333333 | 36 |
py
|
fl-analysis
|
fl-analysis-master/src/aggregation/aggregators.py
|
from copy import deepcopy
import numpy as np
import logging
class Aggregator:
""" Aggregation behavior """
def aggregate(self, global_weights, client_weight_list):
"""
:type client_weight_list: list[np.ndarray]
"""
raise NotImplementedError("Subclass")
class FedAvg(Aggregator):
def __init__(self, lr):
self.lr = lr
def aggregate(self, global_weights, client_weight_list):
"""Procedure for merging client weights together with `global_learning_rate`."""
# return deepcopy(client_weight_list[0]) # Take attacker's
current_weights = global_weights
new_weights = deepcopy(current_weights)
# return new_weights
update_coefficient = self.lr
for client in range(0, len(client_weight_list)):
for layer in range(len(client_weight_list[client])):
new_weights[layer] = new_weights[layer] + \
update_coefficient * (client_weight_list[client][layer] - current_weights[layer])
if np.isnan(new_weights[layer]).any(): # TODO: Remove
print(f"Layer {layer} is NaN!")
# import sys
# np.set_printoptions(threshold=sys.maxsize)
# print(new_weights[layer])
# print("XX")
# print(client_weight_list[client][layer])
# print("XX")
# print(current_weights[layer])
return new_weights
class TrimmedMean(Aggregator):
def __init__(self, beta, lr):
"""
:type beta: float fraction of values to truncate
"""
self.beta = beta
self.lr = lr
assert 0 < self.beta < 1/2, "Beta must be between zero and 1/2!"
def aggregate(self, global_weights, client_weight_list):
assert self.beta < 0.5, "Beta must be smaller than 0.5!"
truncate_count = int(self.beta * len(client_weight_list))
assert len(client_weight_list) - (truncate_count * 2) > 0, "Must be more clients for a given beta!"
current_weights = global_weights
new_weights = deepcopy(current_weights)
# sort by parameter
accumulator = [np.zeros([*layer.shape, len(client_weight_list)], layer.dtype) for layer in new_weights]
for client in range(0, len(client_weight_list)):
for layer in range(len(client_weight_list[client])):
accumulator[layer][..., client] = client_weight_list[client][layer] - current_weights[layer]
for layer in range(len(accumulator)):
accumulator[layer] = np.sort(accumulator[layer], -1)
if truncate_count > 0:
accumulator[layer] = accumulator[layer][..., truncate_count:-truncate_count]
else:
logging.warning(f"Beta is too low ({self.beta}), trimming no values which means we effectively take the mean.")
new_weights[layer] = new_weights[layer] + \
self.lr * np.mean(accumulator[layer], -1) * \
len(client_weight_list) # Multiply by list of clients
return new_weights
def build_aggregator(config):
aggregator = config.server.aggregator
lr = config.server.global_learning_rate
if lr < 0:
logging.info("Using default global learning rate of n/m")
lr = config.environment.num_clients / config.environment.num_selected_clients
else:
lr = lr
weight_coefficient = lr / config.environment.num_clients
from src import aggregation
cls = getattr(aggregation, aggregator["name"])
if "args" in aggregator:
return cls(lr=weight_coefficient, **aggregator["args"])
else:
return cls(lr=weight_coefficient)
# if aggregator.name == "FedAvg":
# return FedAvg(weight_coefficient)
# elif aggregator.name == "TrimmedMean":
# return TrimmedMean(config['trimmed_mean_beta'], weight_coefficient)
# else:
# raise NotImplementedError(f"Aggregator {aggregator} not supported!")
| 4,099 | 35.607143 | 127 |
py
|
fl-analysis
|
fl-analysis-master/src/aggregation/trimmed_mean_test.py
|
import unittest
import numpy as np
from src.aggregation.aggregators import TrimmedMean
class TrimmedMeanTest(unittest.TestCase):
def test_aggregates_properly(self):
w1 = np.array(((1, 5), (1, 5)))
w2 = np.array(((2, 3), (2, 3)))
w3 = np.array(((10, 11), (10, 11)))
average = TrimmedMean(0.34, 1.0).aggregate(np.zeros(w1.shape), [w1, w2, w3])
print(average)
if __name__ == '__main__':
unittest.main()
| 455 | 21.8 | 84 |
py
|
fl-analysis
|
fl-analysis-master/src/aggregation/__init__.py
|
from .aggregators import FedAvg, TrimmedMean, Aggregator
| 57 | 28 | 56 |
py
|
fl-analysis
|
fl-analysis-master/src/attack/targeted_attack.py
|
from src.attack.attack import LossBasedAttack
import tensorflow as tf
import logging
import numpy as np
from src.data import image_augmentation
logger = logging.getLogger(__name__)
class TargetedAttack(LossBasedAttack):
def generate(self, dataset, model, **kwargs):
self.parse_params(**kwargs)
self.weights = model.get_weights()
loss_object_with_reg = self._combine_losses(
self.stealth_method.loss_term(model) if self.stealth_method is not None else None,
self.stealth_method.alpha if self.stealth_method is not None else None)
for epoch in range(self.num_epochs):
batch_counter = 0
for batch_x, batch_y in dataset.get_data_with_aux(self.poison_samples, self.num_batch, self.noise_level): # 10 is ICML
# print(f"LR: {mal_optimizer._decayed_lr(var_dtype=tf.float32)}")
with tf.GradientTape() as tape:
loss_value = loss_object_with_reg(y_true=batch_y, y_pred=model(batch_x, training=True))
# print(loss_value)
# print(batch_y)
# image_augmentation.debug(batch_x[0:1], batch_y[0:1])
grads = self._compute_gradients(tape, loss_value, model)
self.optimizer.apply_gradients(zip(grads, model.trainable_variables))
if self.step_decay is not None:
self.step_decay.apply_step()
if self.stealth_method is not None:
self.stealth_method.update_after_batch(model)
batch_counter += 1
# test_success, adv_success = self.eval_aux_test(dataset, model, self.loss_object)
# print(test_success, adv_success)
logger.info(f"Epoch {epoch}: {batch_counter}")
if self.stealth_method is not None:
self.stealth_method.update_after_training(model)
return model.get_weights()
def parse_params(self, num_epochs, num_batch, poison_samples, optimizer, loss_object, step_decay=None,
noise_level=None):
self.num_epochs = num_epochs
self.num_batch = num_batch
self.poison_samples = poison_samples
self.optimizer = optimizer
self.loss_object = loss_object
self.step_decay = step_decay
self.noise_level = noise_level
def eval_aux_test(self, dataset, model, loss_object):
def calc_acc(ds):
counter = 10
adv_ss = []
for batch_x, batch_y in ds: # aux samples
preds = model(batch_x, training=False)
loss_value = loss_object(y_true=batch_y, y_pred=preds)
pred_inds = preds.numpy().argmax(axis=1) == batch_y
# print(pred_inds, batch_y)
adv_success = np.mean(pred_inds)
adv_ss.append(adv_success)
counter -= 1
if counter == 0:
break
return np.mean(adv_ss)
return calc_acc(dataset.get_data()), calc_acc(dataset.get_data_with_aux(self.poison_samples, self.num_batch, self.noise_level))
| 3,152 | 35.241379 | 135 |
py
|
fl-analysis
|
fl-analysis-master/src/attack/attack.py
|
from src.data.tf_data import Dataset
from src.attack.evasion.evasion_method import EvasionMethod
class Attack(object):
def __init__(self):
pass
def generate(self, dataset, model, **kwargs):
raise NotImplementedError("Sub-classes must implement generate.")
return x
def _compute_gradients(self, tape, loss_value, model):
grads = tape.gradient(loss_value, model.trainable_variables)
return grads
class StealthAttack(Attack):
def __init__(self):
super().__init__()
self.stealth_method = None
def set_stealth_method(self, stealth_method: EvasionMethod):
"""
:type stealth_method: EvasionMethod|None
"""
self.stealth_method = stealth_method
class LossBasedAttack(StealthAttack):
def _combine_losses(self, reg, alpha):
"""
Combine loss with regularization loss.
:param reg: callable regularization loss callback function
:param alpha: float|None
:return:
"""
if reg is None or alpha is None:
def direct_loss(y_true, y_pred):
return self.loss_object(y_true=y_true, y_pred=y_pred)
return direct_loss
def loss(y_true, y_pred):
return alpha * self.loss_object(y_true=y_true, y_pred=y_pred) + \
((1 - alpha) * reg(y_true=y_true, y_pred=y_pred))
return loss
class AttackDataset(object):
def get_data(self):
raise NotImplementedError()
class AttackDatasetBridge(AttackDataset):
def __init__(self, global_dataset: Dataset):
self.global_dataset = global_dataset
def get_data_with_aux(self, poison_samples, num_batch, noise_level):
return self.global_dataset.get_data_with_aux(poison_samples, num_batch, noise_level)
def get_data(self):
return self.global_dataset.get_data()
| 1,884 | 26.318841 | 92 |
py
|
fl-analysis
|
fl-analysis-master/src/attack/framework_attack_wrapper.py
|
class FrameworkAttackWrapper(object):
"""Wraps an attack with dict params to invocate later."""
def __init__(self, attack, kwargs):
self.attack = attack
self.kwargs = kwargs
| 200 | 24.125 | 61 |
py
|
fl-analysis
|
fl-analysis-master/src/attack/anticipate_tf_attack.py
|
from src.attack.attack import LossBasedAttack
import logging
import numpy as np
import tensorflow as tf
from copy import copy
logger = logging.getLogger(__name__)
# Move this into generate later
# from src.torch_compat.anticipate import train_anticipate
class AnticipateTfAttack(LossBasedAttack):
def generate(self, dataset, model, **kwargs):
self.parse_params(**kwargs)
self.weights = model.get_weights()
loss_object_with_reg = self._combine_losses(
self.stealth_method.loss_term(model) if self.stealth_method is not None else None,
self.stealth_method.alpha if self.stealth_method is not None else None)
attack_model = model
current_model = copy(model)
current_model.set_weights(attack_model.get_weights())
fl_no_models = 10
# from datetime import datetime
# stamp = datetime.now().strftime("%Y%m%d-%H%M%S")
# logdir = 'logs/func/%s' % stamp # <- Name of this `run`
# writer = tf.summary.create_file_writer(logdir)
# tf.summary.trace_on(graph=True, profiler=False)
for epoch in range(self.num_epochs):
batch_counter = 0
for batch_x, batch_y in dataset.get_data_with_aux(self.poison_samples, self.num_batch, self.noise_level):
# print(f"LR: {mal_optimizer._decayed_lr(var_dtype=tf.float32)}")
loss = None
with tf.GradientTape(persistent=True) as tape:
for anticipate_i in range(self.anticipate_steps):
if anticipate_i == 0:
current_model = self.honest_training(tape, dataset, current_model)
for att_weight, cur_weight in zip(attack_model.trainable_variables, current_model.trainable_variables):
after_avg = (att_weight + (cur_weight * (fl_no_models - 1))) / fl_no_models
cur_weight.assign(after_avg)
else:
current_model = self.honest_training(tape, dataset, current_model)
if self.optimization_method == 'A':
if anticipate_i == self.anticipate_steps - 1:
loss_value = loss_object_with_reg(y_true=batch_y,
y_pred=current_model(batch_x, training=True))
loss = loss_value
else:
loss_value = loss_object_with_reg(y_true=batch_y,
y_pred=current_model(batch_x, training=True))
if loss is None:
loss = loss_value
else:
loss = loss + loss_value
# print(loss_value)
# print(batch_y)
# image_augmentation.debug(batch_x[0:1], batch_y[0:1])
grads = self._compute_gradients(tape, loss_value, model)
self.optimizer.apply_gradients(zip(grads, attack_model.trainable_variables))
# if self.step_decay is not None:
# self.step_decay.apply_step()
#
# if self.stealth_method is not None:
# self.stealth_method.update_after_batch(model)
batch_counter += 1
# test_success, adv_success = self.eval_aux_test(dataset, model, self.loss_object)
# print(test_success, adv_success)
logger.info(f"Epoch {epoch}: {batch_counter}")
if self.stealth_method is not None:
self.stealth_method.update_after_training(attack_model)
# with writer.as_default():
# tf.summary.trace_export("attack_graph", step=1)
return attack_model.get_weights()
def honest_training(self, tape, dataset, model):
honest_optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
loss_object_with_reg = self._combine_losses(
self.stealth_method.loss_term(model) if self.stealth_method is not None else None,
self.stealth_method.alpha if self.stealth_method is not None else None)
for epoch in range(self.num_epochs):
for batch_x, batch_y in dataset.get_data():
# print(f"LR: {mal_optimizer._decayed_lr(var_dtype=tf.float32)}")
predictions = model(batch_x, training=True)
total_loss = loss_object_with_reg(y_true=batch_y, y_pred=predictions)
grads = tape.gradient(total_loss, model.trainable_variables)
honest_optimizer.apply_gradients(zip(grads, model.trainable_weights))
# logger.info(f"Epoch {epoch}: {batch_counter}")
# batch_counter = batch_counter + 1
break
return model
# def local_train_honest(self, tape, dataset, model, num_clients=1):
# # TODO: Local fl training steps?
# for (batch_x, batch_y) in dataset.get_data():
# predictions = model(batch_x, training=True)
# loss_value = self.loss_object(y_true=batch_y, y_pred=predictions)
#
# # reg = tf.reduce_sum(model.losses)
# # total_loss = loss_value + reg
#
# grads = tape.gradient(loss_value, model.trainable_variables)
# # honest optimizer?
# self.optimizer.apply_gradients(zip(grads, model.trainable_weights))
def parse_params(self, num_epochs, num_batch, poison_samples, optimizer, loss_object, step_decay=None,
noise_level=None, anticipate_steps=7, model_type="lenet5_mnist", optimization_method=None, fl_no_models=10, regular_train=False):
self.num_epochs = num_epochs
self.num_batch = num_batch
self.poison_samples = poison_samples
self.optimizer = optimizer
self.loss_object = loss_object
self.step_decay = step_decay
self.noise_level = noise_level
self.anticipate_steps = anticipate_steps
self.model_type = model_type
self.optimization_method = optimization_method
self.fl_no_models = fl_no_models
self.regular_train = regular_train
def eval_aux_test(self, dataset, model, loss_object):
def calc_acc(ds):
counter = 10
adv_ss = []
for batch_x, batch_y in ds: # aux samples
preds = model(batch_x, training=False)
loss_value = loss_object(y_true=batch_y, y_pred=preds)
pred_inds = preds.numpy().argmax(axis=1) == batch_y
# print(pred_inds, batch_y)
adv_success = np.mean(pred_inds)
adv_ss.append(adv_success)
counter -= 1
if counter == 0:
break
return np.mean(adv_ss)
return calc_acc(dataset.get_data()), calc_acc(dataset.get_data_with_aux(self.poison_samples, self.num_batch, self.noise_level))
| 7,110 | 40.104046 | 150 |
py
|
fl-analysis
|
fl-analysis-master/src/attack/parse_config.py
|
def map_objective(name):
"""
:param name: str
:param evasion: EvasionMethod to be added
:return:
"""
from src import attack
cls = getattr(attack, name)
return cls()
# def load_attacks(attack_file_name):
# with open(attack_file_name) as stream:
# yaml = YAML(typ='safe')
# attacks = yaml.load(stream)
#
# # Many-to-many cartesian product
# objectives = attacks['objectives']
# evasions = attacks['evasion']
# backdoors = attacks['backdoors']
#
# return attacks
# class AttackConfig():
| 562 | 19.107143 | 45 |
py
|
fl-analysis
|
fl-analysis-master/src/attack/__init__.py
|
from .targeted_attack import TargetedAttack
from .untargeted_attack import UntargetedAttack
from .anticipate_tf_attack import AnticipateTfAttack
| 145 | 35.5 | 52 |
py
|
fl-analysis
|
fl-analysis-master/src/attack/untargeted_attack.py
|
from src.attack.attack import LossBasedAttack
import tensorflow as tf
import logging
logger = logging.getLogger(__name__)
class UntargetedAttack(LossBasedAttack):
def generate(self, dataset, model, **kwargs):
self.parse_params(**kwargs)
self.weights = model.get_weights()
loss_object_with_reg = self._combine_losses(
self.stealth_method.loss_term(model) if self.stealth_method is not None else None,
self.stealth_method.alpha)
for epoch in range(self.num_epochs):
logger.info(f"Epoch {epoch}")
for batch_x, batch_y in dataset.get_data():
with tf.GradientTape() as tape:
loss_value = loss_object_with_reg(y_true=batch_y, y_pred=model(batch_x, training=True))
grads = self._compute_gradients(tape, loss_value, model)
for k in range(len(grads)):
grads[k] = -grads[k]
self.optimizer.apply_gradients(zip(grads, model.trainable_variables))
if self.step_decay is not None:
self.step_decay.apply_step()
if self.stealth_method is not None:
self.stealth_method.update_after_batch(model)
if self.stealth_method is not None:
self.stealth_method.update_after_training(model)
return model.get_weights()
def parse_params(self, num_epochs, optimizer, loss_object, step_decay=None):
self.num_epochs = num_epochs
self.optimizer = optimizer
self.loss_object = loss_object
self.step_decay = step_decay
| 1,636 | 33.829787 | 107 |
py
|
fl-analysis
|
fl-analysis-master/src/attack/test/AttackTest.py
|
import tensorflow as tf
import numpy as np
from src.data.tf_data_global import IIDGlobalDataset
from src.attack.evasion.norm import NormBoundPGDEvasion
from src.attack.evasion.trimmed_mean import TrimmedMeanEvasion
from src.attack.attack import AttackDatasetBridge
from src.attack.untargeted_attack import UntargetedAttack
from src.attack.targeted_attack import TargetedAttack
from src.data.tf_data import ImageGeneratorDataset, Dataset
class AttackTest(tf.test.TestCase):
def setUp(self):
super(AttackTest, self).setUp()
self.model = tf.keras.models.load_model("./../../../models/lenet5_emnist_098.h5")
(x_train, y_train), (x_test, y_test) = Dataset.get_emnist_dataset(-1, 1)
(x_train, y_train), (x_test, y_test) = (x_train[0], y_train[0]), (x_test[0], y_test[0])
(x_train, y_train) = (x_train[:15000], y_train[:15000])
targets = [1, 2, 3, 4, 5, 6, 7, 8]
x_mal, y_mal_orig = x_train[targets], y_train[targets]
y_mal = np.repeat(3, len(targets)).astype(y_train.dtype)
np.delete(x_train, targets)
np.delete(y_train, targets)
self.global_dataset = IIDGlobalDataset(x_train, y_train, 30, x_test, y_test)
self.dataset = AttackDatasetBridge(Dataset(x_train, y_train))
self.dataset.global_dataset.x_aux = x_mal
self.dataset.global_dataset.y_aux = y_mal_orig
self.dataset.global_dataset.mal_aux_labels = y_mal
self.test_accuracy = tf.keras.metrics.Mean(name='test_accuracy')
def _evaluate_targeted(self):
batch_x, batch_y = self.dataset.global_dataset.x_aux, self.dataset.global_dataset.mal_aux_labels
preds = self.model(batch_x, training=False).numpy().argmax(axis=1)
pred_inds = preds == batch_y
adv_success = np.mean(pred_inds)
print(f"Adv success: {adv_success}")
def _evaluate_untargeted(self):
for batch_x, batch_y in self.global_dataset.get_test_batch(64, 12):
self.optimized_evaluate(batch_x, batch_y)
test_accuracy = self.test_accuracy.result().numpy()
print(f"Adv success: {1 - test_accuracy}")
@tf.function
def optimized_evaluate(self, batch_x, batch_y):
prediction_tensor = self.model(batch_x, training=False)
prediction = prediction_tensor
y_ = tf.cast(tf.argmax(prediction, axis=1), tf.uint8)
test_accuracy_batch = tf.equal(y_, batch_y)
self.test_accuracy(tf.reduce_mean(tf.cast(test_accuracy_batch, tf.float32)))
def tearDown(self):
pass
def test_untargeted_attack(self):
self._evaluate_untargeted()
att = UntargetedAttack()
att.set_stealth_method(NormBoundPGDEvasion(self.model.get_weights(), "linf", 0.1, 1, pgd_factor=.1))
weights = att.generate(self.dataset, self.model,
num_epochs=1,
optimizer=tf.keras.optimizers.Adam(),
loss_object=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
self.model.set_weights(weights)
self._evaluate_untargeted()
def test_untargeted_attack_tootight(self):
self._evaluate_untargeted()
att = UntargetedAttack()
att.set_stealth_method(NormBoundPGDEvasion(self.model.get_weights(), "linf", 0.00001, 1, pgd_factor=0.00001))
weights = att.generate(self.dataset, self.model,
num_epochs=1,
optimizer=tf.keras.optimizers.Adam(),
loss_object=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
alpha=0.1)
self.model.set_weights(weights)
self._evaluate_untargeted()
def test_untargeted_attack_trimmedmean(self):
self._evaluate_untargeted()
att = UntargetedAttack()
att.set_stealth_method(TrimmedMeanEvasion(0.5, [self.model.get_weights(), self.model.get_weights(), self.model.get_weights()], 1))
weights = att.generate(self.dataset, self.model,
num_epochs=1,
optimizer=tf.keras.optimizers.Adam(),
loss_object=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
self.model.set_weights(weights)
self._evaluate_untargeted()
def test_targeted_attack_norm(self):
self._evaluate_untargeted()
att = TargetedAttack()
att.set_stealth_method(NormBoundPGDEvasion(self.model.get_weights(), "linf", 0.1, 1, pgd_factor=.1))
weights = att.generate(self.dataset, self.model,
num_epochs=3,
num_batch=6,
poison_samples=5,
optimizer=tf.keras.optimizers.Adam(),
loss_object=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
self.model.set_weights(weights)
self._evaluate_targeted()
def test_targeted_attack_norm_l2(self):
self._evaluate_untargeted()
l2 = 1.0
att = TargetedAttack()
att.set_stealth_method(NormBoundPGDEvasion(self.model.get_weights(), "l2", 2, l2))
old_weights = self.model.get_weights()
new_weights = att.generate(self.dataset, self.model,
num_epochs=3,
num_batch=6,
poison_samples=5,
optimizer=tf.keras.optimizers.Adam(),
loss_object=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
delta_weights = [new_weights[i] - old_weights[i]
for i in range(len(old_weights))]
l2_norm_tensor = tf.constant(l2)
layers_to_clip = [tf.reshape(delta_weights[i], [-1]) for i in range(len(delta_weights))] # for norm calculation
norm = tf.norm(tf.concat(layers_to_clip, axis=0))
# print(f"Norm: {norm}")
multiply = min((l2_norm_tensor / norm).numpy(), 1.0)
new_weights_clipped = [delta_weights[i] * multiply for i in
range(len(delta_weights))]
self.model.set_weights(new_weights)
self._evaluate_targeted()
if __name__ == '__main__':
tf.test.main()
| 6,328 | 45.19708 | 138 |
py
|
fl-analysis
|
fl-analysis-master/src/attack/test/__init__.py
| 0 | 0 | 0 |
py
|
|
fl-analysis
|
fl-analysis-master/src/attack/evasion/evasion_method.py
|
import tensorflow as tf
class EvasionMethod(object):
def __init__(self, alpha):
"""
:type alpha: float|None alpha weight of evasion method. The closer to 1 the more we want to evade.
"""
self.alpha = alpha
def loss_term(self, model):
return None
def update_after_batch(self, model):
return
def update_after_training(self, model):
return
| 418 | 17.217391 | 106 |
py
|
fl-analysis
|
fl-analysis-master/src/attack/evasion/trimmed_mean.py
|
from .evasion_method import EvasionMethod
import numpy as np
import tensorflow as tf
class TrimmedMeanEvasion(EvasionMethod):
def __init__(self, benign_updates_this_round, alpha, n_remove_malicious):
"""
:type benign_updates_this_round: [[np.ndarray]] list of client updates,
:type alpha: float alpha of regularization
:type n_remove_malicious int number of updates to trim on each side
one update contains a list of per-layer weights.
"""
super().__init__(alpha)
assert len(benign_updates_this_round) > 2 * n_remove_malicious, "Must have more entries than malicious items being removed."
# Organize updates per client into one single numpy matrix
self.benign_clients = [np.zeros([*layer.shape, len(benign_updates_this_round)], layer.dtype)
for layer in benign_updates_this_round[0]]
for client in range(0, len(benign_updates_this_round)):
for layer in range(len(benign_updates_this_round[client])):
self.benign_clients[layer][..., client] = benign_updates_this_round[client][layer]
for layer in range(len(self.benign_clients)):
sorted = np.sort(self.benign_clients[layer], -1)
# Remove bottom and top values
self.benign_clients[layer] = sorted[..., n_remove_malicious:-n_remove_malicious]
# For now, take smallest of top and bottom values ... may backfire in future
self.layer_val = [np.minimum(np.abs(layer[..., 0]), np.abs(layer[..., -1])) for layer in self.benign_clients]
self.layer_val_tensor = [tf.convert_to_tensor(layer) for layer in self.layer_val]
def loss_term(self, model):
def loss(y_true, y_pred):
weight_norm = tf.constant(0.0, dtype=tf.float32)
layer_i = 0
local_weights = model.layers
for local_weight_layer in local_weights:
w = local_weight_layer.weights
if len(w) > 1:
global_layer_tensor_w = self.layer_val_tensor[layer_i]
# global_layer_tensor_b = tf.convert_to_tensor(global_weights[layer_i + 1])
delta_weight = w[0] - global_layer_tensor_w
# weight_norm += tf.nn.l2_loss(delta_weight)
weight_norm = tf.add(weight_norm, tf.nn.l2_loss(delta_weight))
layer_i += len(w)
return weight_norm
return loss
def update_after_training(self, model):
# Now we explicitly clip, not sure if this is needed as if we do not get selected the "second best" would be
pass
| 2,652 | 43.966102 | 132 |
py
|
fl-analysis
|
fl-analysis-master/src/attack/evasion/norm.py
|
from .evasion_method import EvasionMethod
import logging
import numpy as np
import tensorflow as tf
class NormBoundPGDEvasion(EvasionMethod):
"""
Evades norm bound using PGD.
"""
def __init__(self, old_weights, norm_type, scale_factor, clipping_bound=None, pgd_factor=None,
benign_updates=None):
"""
:param old_weights: current weights
:param norm_type: l2 or linf
:param clipping_bound: clipping bound value
:param scale_factor: factor with which to scale the update
:param pgd_factor: factor with which to apply pgd
"""
super().__init__(alpha=None) # Alpha is 0 because we use PGD
self.weights = old_weights
self.norm_type = norm_type
self.clipping_bound = clipping_bound
self.scale_factor = scale_factor
self.pgd_factor = pgd_factor
self.benign_updates = benign_updates
self.benign_median = self.compute_median(self.benign_updates) if self.benign_updates is not None and len(self.benign_updates) > 0 else None
def update_after_batch(self, model):
if self.pgd_factor is not None:
# clip pgd
new_weights = model.get_weights()
new_weights = self.apply_pgd_weights(self.weights, new_weights)
model.set_weights(new_weights)
def update_after_training(self, model):
# scale!
new_weights = model.get_weights()
delta_weights = [(new_weights[i] - self.weights[i]) * self.scale_factor
for i in range(len(self.weights))]
update = [self.weights[i] + delta_weights[i] for i in range(len(self.weights))]
model.set_weights(update)
def apply_defense(self, old_weights, new_weights, clip=None, clip_l2=None):
"""
Applies defenses based on configuration
:param clip:
:param old_weights:
:param new_weights:
:return: new weights
"""
assert old_weights is not None, "Old weights can't be none"
assert new_weights is not None, "New weights can't be none"
delta_weights = [new_weights[i] - old_weights[i]
for i in range(len(old_weights))]
# clip_layers = self.config['clip_layers'] if self.config['clip_layers'] != [] else range(len(old_weights))
clip_layers = range(len(old_weights))
if clip is not None:
delta_weights = [np.clip(delta_weights[i], -clip, clip) if i in clip_layers else delta_weights[i]
for i in range(len(delta_weights))]
if clip_l2 and clip_l2 > 0:
delta_weights = self.clip_l2(delta_weights, clip_l2, clip_layers)
new_weights = [old_weights[i] + delta_weights[i] for i in range(len(old_weights))]
return new_weights
def apply_pgd_weights(self, old_weights, new_weights):
pgd = self.norm_type
if self.pgd_factor is not None:
pgd_constraint = self.pgd_factor
self.debug(f"Applying constraint {pgd} with value {pgd_constraint}")
if pgd == 'linf':
new_weights = self.apply_defense(old_weights, new_weights, pgd_constraint, None)
elif pgd == 'l2':
new_weights = self.apply_defense(old_weights, new_weights, None, pgd_constraint)
elif pgd == 'median_l2':
assert self.benign_updates is not None, "We must have knowledge of the benign updates in order to" \
"compute the required bound (median)!"
# compute median
median_pgd = self.benign_median * self.pgd_factor / self.scale_factor
new_weights = self.apply_defense(old_weights, new_weights, None, median_pgd)
else:
raise Exception('PGD type not supported')
return new_weights
def clip_l2(self, delta_weights, l2, clip_layers):
"""
Calculates the norm per layer.
:param delta_weights: current weight update
:param l2: l2 bound
:param clip_layers: what layers to apply clipping to
:return:
"""
l2_norm_tensor = tf.constant(l2, dtype=tf.float32)
layers_to_clip = [tf.reshape(delta_weights[i], [-1]) for i in range(len(delta_weights)) if
i in clip_layers] # for norm calculation
norm = tf.norm(tf.concat(layers_to_clip, axis=0))
multiply = min((l2_norm_tensor / norm).numpy(), 1.0)
return [delta_weights[i] * multiply if i in clip_layers else delta_weights[i] for i in
range(len(delta_weights))]
def compute_median(self, client_updates):
"""
Compute the client median values based on client updates.
@type client_updates: list[list[np.array]] client layer weights
"""
l2_norms_per_client = [tf.norm(tf.concat([tf.reshape(delta_weights[i], [-1]) \
for i in range(len(delta_weights))], axis=0)) \
for delta_weights in client_updates] # for norm calculation
median_real = np.median(l2_norms_per_client)
# Given this median, we can be smart and calcluate if, including us we are even/uneven, and depending on
# that, select the value 1 above the median (influenced by us being added to the list), or average between the two.
# Assumes attacker will _always_ give an update that is larger.
# TODO: This should also depend on the number of malicious attackers.
l2_norms_per_client.append(np.max(l2_norms_per_client)) # Pretend to add self as max
median = np.median(l2_norms_per_client)
return median
def debug(self, v):
logging.debug(f"Norm bound stealth: {v}")
| 5,826 | 42.162963 | 147 |
py
|
fl-analysis
|
fl-analysis-master/src/attack/evasion/norm_prob_check.py
|
from . import NormBoundPGDEvasion
from .evasion_method import EvasionMethod
import logging
import numpy as np
import tensorflow as tf
class NormBoundProbabilisticCheckingEvasion(NormBoundPGDEvasion):
"""
Adaptive attack for probabilistic checking
"""
def __init__(self, old_weights, norm_type, scale_factor, keep_number_of_weights, clipping_bound=None, pgd_factor=None, benign_updates=None):
self.keep_number_of_weights = keep_number_of_weights # K, top-K
super().__init__(old_weights, norm_type, scale_factor, clipping_bound, pgd_factor, benign_updates)
assert self.clipping_bound is not None, "Must provide an explicit clipping bound to clip other values to!"
assert self.norm_type == "linf", "Norm type must be linf in order to use probabilistic checking!"
def update_after_training(self, model):
# scale top-K!
new_weights = model.get_weights()
delta_weights = [(new_weights[i] - self.weights[i])
for i in range(len(self.weights))]
delta_weights = self.flatten_update(delta_weights)
# select top-K
indices_to_scale = np.argpartition(np.abs(delta_weights), -self.keep_number_of_weights)[-self.keep_number_of_weights:]
indices_to_not_scale = list(set(range(len(delta_weights))) - set(indices_to_scale.tolist()))
for i in indices_to_scale:
delta_weights[i] = delta_weights[i] * self.scale_factor
for i in indices_to_not_scale:
delta_weights[i] = np.clip(delta_weights[i], -self.clipping_bound, self.clipping_bound)
delta_weights = self.unflatten(delta_weights, self.weights)
update = [self.weights[i] + delta_weights[i] for i in range(len(self.weights))]
model.set_weights(update)
def unflatten(self, w, weights):
sizes = [x.size for x in weights]
split_idx = np.cumsum(sizes)
update_ravelled = np.split(w, split_idx)[:-1]
shapes = [x.shape for x in weights]
update_list = [np.reshape(u, s) for s, u in zip(shapes, update_ravelled)]
return update_list
def flatten_update(self, update):
return np.concatenate([x.ravel() for x in update])
def apply_defense(self, old_weights, new_weights, clip=None, clip_l2=None):
"""
Applies defenses based on configuration
:param clip:
:param old_weights:
:param new_weights:
:return: new weights
"""
assert old_weights is not None, "Old weights can't be none"
assert new_weights is not None, "New weights can't be none"
delta_weights = [new_weights[i] - old_weights[i]
for i in range(len(old_weights))]
# clip_layers = self.config['clip_layers'] if self.config['clip_layers'] != [] else range(len(old_weights))
clip_layers = range(len(old_weights))
if clip is not None:
delta_weights = [np.clip(delta_weights[i], -clip, clip) if i in clip_layers else delta_weights[i]
for i in range(len(delta_weights))]
if clip_l2 and clip_l2 > 0:
delta_weights = self.clip_l2(delta_weights, clip_l2, clip_layers)
new_weights = [old_weights[i] + delta_weights[i] for i in range(len(old_weights))]
return new_weights
def apply_pgd_weights(self, old_weights, new_weights):
pgd = self.norm_type
if self.pgd_factor is not None:
pgd_constraint = self.pgd_factor
self.debug(f"Applying constraint {pgd} with value {pgd_constraint}")
if pgd == 'linf':
new_weights = self.apply_defense(old_weights, new_weights, pgd_constraint, None)
elif pgd == 'l2':
new_weights = self.apply_defense(old_weights, new_weights, None, pgd_constraint)
elif pgd == 'median_l2':
assert self.benign_updates is not None, "We must have knowledge of the benign updates in order to" \
"compute the required bound (median)!"
# compute median
median_pgd = self.benign_median * self.pgd_factor / self.scale_factor
new_weights = self.apply_defense(old_weights, new_weights, None, median_pgd)
else:
raise Exception('PGD type not supported')
return new_weights
def clip_l2(self, delta_weights, l2, clip_layers):
"""
Calculates the norm per layer.
:param delta_weights: current weight update
:param l2: l2 bound
:param clip_layers: what layers to apply clipping to
:return:
"""
l2_norm_tensor = tf.constant(l2, dtype=tf.float32)
layers_to_clip = [tf.reshape(delta_weights[i], [-1]) for i in range(len(delta_weights)) if
i in clip_layers] # for norm calculation
norm = tf.norm(tf.concat(layers_to_clip, axis=0))
multiply = min((l2_norm_tensor / norm).numpy(), 1.0)
return [delta_weights[i] * multiply if i in clip_layers else delta_weights[i] for i in
range(len(delta_weights))]
def compute_median(self, client_updates):
"""
Compute the client median values based on client updates.
@type client_updates: list[list[np.array]] client layer weights
"""
l2_norms_per_client = [tf.norm(tf.concat([tf.reshape(delta_weights[i], [-1]) \
for i in range(len(delta_weights))], axis=0)) \
for delta_weights in client_updates] # for norm calculation
median_real = np.median(l2_norms_per_client)
# Given this median, we can be smart and calcluate if, including us we are even/uneven, and depending on
# that, select the value 1 above the median (influenced by us being added to the list), or average between the two.
# Assumes attacker will _always_ give an update that is larger.
# TODO: This should also depend on the number of malicious attackers.
l2_norms_per_client.append(np.max(l2_norms_per_client)) # Pretend to add self as max
median = np.median(l2_norms_per_client)
return median
def debug(self, v):
logging.debug(f"Norm bound stealth: {v}")
| 6,332 | 43.598592 | 144 |
py
|
fl-analysis
|
fl-analysis-master/src/attack/evasion/__init__.py
|
from .norm import NormBoundPGDEvasion
from .trimmed_mean import TrimmedMeanEvasion
from .norm_prob_check import NormBoundProbabilisticCheckingEvasion
from .neurotoxin import NeurotoxinEvasion
def construct_evasion(classname, **kwargs):
"""Constructs evasion method"""
import src.attack.evasion as ev
cls = getattr(ev, classname)
return cls(**kwargs)
| 368 | 29.75 | 66 |
py
|
fl-analysis
|
fl-analysis-master/src/attack/evasion/neurotoxin.py
|
from . import NormBoundPGDEvasion
from .evasion_method import EvasionMethod
import logging
import numpy as np
import tensorflow as tf
class NeurotoxinEvasion(NormBoundPGDEvasion):
"""
Adaptive attack for probabilistic checking
"""
def __init__(self, old_weights, norm_type, scale_factor, topk, last_round_weights, clipping_bound=None, benign_updates=None):
self.topk = topk
self.last_round_weights = last_round_weights # K, top-K
self.indices_to_reset = None
self.pgd_factor = np.nan # so apply_pgd_weights gets called
super().__init__(old_weights, norm_type, scale_factor, clipping_bound, np.nan, benign_updates)
assert self.last_round_weights is not None, "Last round's weights cannot be None, we need them to estimate S!"
self.indices_to_reset = self.compute_smallest_set_indices(old_weights, last_round_weights, topk)
def compute_smallest_set_indices(self, old_weights, last_round_weights, topk):
"""
Set self.indices_to_reset
@return:
"""
delta_weights = [old_weights[i] - last_round_weights[i]
for i in range(len(old_weights))]
delta_weights = self.flatten_update(delta_weights)
# find topk
keep_number_of_weights = int(len(delta_weights) * (topk / 100.0))
indices_to_reset = np.argpartition(np.abs(delta_weights), -keep_number_of_weights)[-keep_number_of_weights:]
return indices_to_reset
def apply_pgd_weights(self, old_weights, new_weights):
"""
Does not apply PGD but projects onto S
@param old_weights:
@param new_weights:
@return:
"""
delta_weights = [new_weights[i] - old_weights[i]
for i in range(len(old_weights))]
delta_weights = self.flatten_update(delta_weights)
delta_weights[self.indices_to_reset] = 0.0
delta_weights = self.unflatten(delta_weights, self.weights)
new_weights = [old_weights[i] + delta_weights[i] for i in range(len(old_weights))]
return new_weights
def unflatten(self, w, weights):
sizes = [x.size for x in weights]
split_idx = np.cumsum(sizes)
update_ravelled = np.split(w, split_idx)[:-1]
shapes = [x.shape for x in weights]
update_list = [np.reshape(u, s) for s, u in zip(shapes, update_ravelled)]
return update_list
def flatten_update(self, update):
return np.concatenate([x.ravel() for x in update])
| 2,523 | 39.063492 | 129 |
py
|
fl-analysis
|
fl-analysis-master/src/test/DataLoaderTest.py
|
import tensorflow as tf
import numpy as np
from src.client_attacks import Attack
from src.data import data_loader
from src.data.tf_data_global import NonIIDGlobalDataset
class DataLoaderTest(tf.test.TestCase):
def setUp(self):
super(DataLoaderTest, self).setUp()
def tearDown(self):
pass
def test_get_datasets_count(self):
datasets = {'cifar10': 50000,
'mnist': 60000,
'femnist': 341870}
for name, count in datasets.items():
dataset = self.load_dataset(name, 'IID')
self.assertEqual(np.sum([len(x) for x in dataset.y_train]), count)
def test_get_emnist_noniid_dataset(self):
dataset = self.load_dataset('femnist', 'nonIID')
self.assertEqual(np.sum([len(x) for x in dataset.y_train]), 341873)
def test_get_attack_cifar(self):
attack_config = {
'backdoor_feature_aux_train': [568, 3934, 12336, 30560, 33105, 33615, 33907, 36848, 41706],
# Racing cars with stripes in the backgorund
'backdoor_feature_aux_test': [330, 30696, 40713],
'backdoor_feature_target': 2,
'backdoor_feature_remove_malicious': False,
'backdoor_feature_augment_times': 200,
'backdoor_feature_benign_regular': []
}
dataset = self.load_dataset('cifar10', 'nonIID', attack_type=Attack.BACKDOOR, other=attack_config)
self.assertEqual(dataset.x_aux_train.shape[0], 9)
self.assertEqual(dataset.x_aux_test.shape[0], 3)
def test_get_attack_femnist(self):
attack_config = {
'backdoor_feature_aux_train': [568, 3934, 12336, 30560, 33105, 33615, 33907, 36848, 41706],
# Racing cars with stripes in the backgorund
'backdoor_feature_aux_test': [330, 30696, 40713],
'backdoor_feature_target': 2,
'backdoor_feature_remove_malicious': False,
'backdoor_feature_augment_times': 200,
'backdoor_feature_benign_regular': []
}
dataset = self.load_dataset('femnist', 'nonIID', attack_type=Attack.BACKDOOR, other=attack_config)
self.assertEqual(dataset.x_aux_train.shape[0], 9)
self.assertEqual(dataset.x_aux_test.shape[0], 3)
def load_dataset(self, dataset, data_distribution, attack_type=Attack.TARGETED, num_clients=10, number_of_samples=-1, other={}):
config = {
'dataset': dataset,
'number_of_samples': number_of_samples,
'data_distribution': data_distribution,
'num_clients': num_clients,
'attack_type': attack_type
}
config = {**config, **other}
malicious_clients = np.repeat(False, repeats=[num_clients])
malicious_clients[0] = True
dataset = data_loader.load_global_dataset(config, malicious_clients)
self.assertEqual(len(dataset.y_train), num_clients)
self.assertTrue(data_distribution == "IID" or isinstance(dataset, NonIIDGlobalDataset))
return dataset
if __name__ == '__main__':
tf.test.main()
| 3,093 | 37.197531 | 132 |
py
|
fl-analysis
|
fl-analysis-master/src/test/TfDataTest.py
|
import tensorflow as tf
import numpy as np
from src.data.tf_data import ImageGeneratorDataset, Dataset
class TfDataTest(tf.test.TestCase):
def setUp(self):
super(TfDataTest, self).setUp()
def tearDown(self):
pass
def get_dataset(self, aux_size):
(x_train, y_train), (x_test, y_test) = Dataset.get_cifar10_dataset(100)
inds = np.random.choice(x_train.shape[0], aux_size, replace=False)
x_aux, y_aux = x_train[inds, :], y_train[inds]
np.delete(x_train, inds, axis=0)
np.delete(y_train, inds, axis=0)
dataset = ImageGeneratorDataset(x_train, y_train, 50, x_test, y_test)
dataset.x_aux = x_aux
dataset.y_aux = y_aux
dataset.mal_aux_labels = np.repeat(0, y_aux.shape[0]).astype(np.uint8)
return dataset
def test_extends_normal(self):
samples = list(self.get_dataset(10).get_data_with_aux(10, 10))
self.assertEqual(len(samples), 10)
def test_extends_smallerbatch(self):
samples = list(self.get_dataset(5).get_data_with_aux(10, 10))
self.assertEqual(len(samples), 10)
self.assertTrue(np.all(samples[0][1][:10] == 0))
for x, y in samples:
print(y)
for x, y in self.get_dataset(5).get_data_with_aux(10, 10):
print(y)
if __name__ == '__main__':
tf.test.main()
| 1,358 | 32.146341 | 79 |
py
|
fl-analysis
|
fl-analysis-master/src/test/__init__.py
| 0 | 0 | 0 |
py
|
|
fl-analysis
|
fl-analysis-master/src/subspace/__init__.py
| 0 | 0 | 0 |
py
|
|
fl-analysis
|
fl-analysis-master/src/subspace/general/tfutil.py
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import numpy as np
from orderedset import OrderedSet
import tensorflow as tf
########################
# Logging helpers
########################
def get_ptt_names(name):
'''Gets param/train/test summary names. Just converts, e.g.,
foo -> foo, foo__train, foo__test
scope/foo -> scope/foo, scope/foo__train, scope/foo__test
'''
splits = tuple(name.rsplit('/',1))
if len(splits) == 1:
return '%s' % name, '%s__train' % name, '%s__test' % name
else:
# len 2
return '%s/%s' % splits, '%s/%s__train' % splits, '%s/%s__test' % splits
def normalize_name(name):
'''Returns a normalized form of name, replacing : with _'''
return name.replace(':', '_')
def hist_summaries(*args, **kwargs):
'''Add tf.histogram_summary for each variable in variables'''
for var in args:
hist_summary(var, **kwargs)
def hist_summaries_param(*args, **kwargs):
kwargs['param'] = True
hist_summaries(*args, **kwargs)
def hist_summaries_traintest(*args, **kwargs):
kwargs['traintest'] = True
hist_summaries(*args, **kwargs)
def hist_summaries_train(*args, **kwargs):
kwargs['train'] = True
hist_summaries(*args, **kwargs)
def hist_summaries_test(*args, **kwargs):
kwargs['test'] = True
hist_summaries(*args, **kwargs)
def hist_summary(var, name=None, traintest=False, param=False, train=False, test=False, orig_collection='orig_histogram'):
assert sum([int(v) for v in (traintest, param, train, test)]) == 1, 'exactly one of {traintest,train,test,param} should be true'
if name is None:
name = var.name
param_name,train_name,test_name = get_ptt_names(name)
if traintest:
train = True
test = True
if param:
tf.compat.v1.summary.histogram(normalize_name(param_name), var, collections=['param_collection', orig_collection])
#print 'Adding summary.histogram for %s in collections %s, %s' % (var, 'param_collection', orig_collection)
if train:
tf.compat.v1.summary.histogram(normalize_name(train_name), var, collections=['train_collection', orig_collection])
#print 'Adding summary.histogram for %s in collections %s, %s' % (var, 'train_collection', orig_collection)
if test:
tf.compat.v1.summary.histogram(normalize_name(test_name), var, collections=['test_collection', orig_collection])
#print 'Adding summary.histograms for %s in collections %s, %s' % (var, 'test_collection', orig_collection)
def scalar_summaries(*args, **kwargs):
'''Add tf.summary.scalar for each variable in variables'''
for var in args:
scalar_summary(var, **kwargs)
def scalar_summaries_param(*args, **kwargs):
kwargs['param'] = True
scalar_summaries(*args, **kwargs)
def scalar_summaries_traintest(*args, **kwargs):
kwargs['traintest'] = True
scalar_summaries(*args, **kwargs)
def scalar_summary(var, name=None, traintest=False, param=False, also_hist=False, orig_collection='orig_scalar'):
'''Add tf.summary.scalar for each variable in variables'''
assert traintest or param, 'one should be true'
if name is None:
name = var.name
param_name, train_name, test_name = get_ptt_names(name)
if param:
tf.compat.v1.summary.scalar(normalize_name(param_name), var, collections=['param_collection', orig_collection])
#print 'Adding summary.scalar for %s in collections %s, %s' % (var, 'param_collection', orig_collection)
if traintest:
tf.compat.v1.summary.scalar(normalize_name(train_name), var, collections=['train_collection', orig_collection])
#print 'Adding summary.scalar for %s in collections %s, %s' % (var, 'train_collection', orig_collection)
tf.compat.v1.summary.scalar(normalize_name(test_name), var, collections=['test_collection', orig_collection])
#print 'Adding summary.scalar for %s in collections %s, %s' % (var, 'test_collection', orig_collection)
if also_hist:
# HACK: also add hist summary for scalars to show them also on the
# Histogram pane. Need to provide a unique name so the histogram
# summary doesn't conflict with the scalar summary
hist_summary(var, name=normalize_name(name + '_(scalar)'), traintest=traintest, param=param, orig_collection=orig_collection)
def log_scalars(writer, iters, scalars_dict, prefix=None):
'''Manually log scalar values. Use like this:
log_scalars(writer, iters, {'test_loss': mean_test_loss,
'test_loss_spring': mean_test_loss_spring,
'test_loss_cross_ent': mean_test_loss_cross_ent,
'test_accuracy': mean_test_acc})
'''
if not prefix:
prefix = ''
if len(prefix) > 0 and not prefix.endswith('/'):
prefix = prefix + '/'
for key, val in scalars_dict.items():
if hasattr(val, 'dtype'):
val = np.asscalar(val) # Convert, e.g., numpy.float32 -> float
writer.add_summary(tf.compat.v1.Summary(value=[tf.compat.v1.Summary.Value(tag='%s%s' % (prefix, key), simple_value=val)]).SerializeToString(), iters)
def image_summaries(*args, **kwargs):
'''Add tf.image_summary for each variable in variables'''
for var in args:
image_summary(var, **kwargs)
def image_summaries_param(*args, **kwargs):
kwargs['param'] = True
image_summaries(*args, **kwargs)
def image_summaries_traintest(*args, **kwargs):
kwargs['traintest'] = True
image_summaries(*args, **kwargs)
def image_summaries_train(*args, **kwargs):
kwargs['train'] = True
image_summaries(*args, **kwargs)
def image_summaries_test(*args, **kwargs):
kwargs['test'] = True
image_summaries(*args, **kwargs)
def image_summary(var, name=None, traintest=False, param=False, train=False, test=False, orig_collection='orig_image'):
assert sum([int(v) for v in (traintest, param, train, test)]) == 1, 'exactly one of {traintest,train,test,param} should be true'
if name is None:
name = var.name
param_name,train_name,test_name = get_ptt_names(name)
if traintest:
train = True
test = True
if param:
tf.image_summary(normalize_name(param_name), var, collections=['param_collection', orig_collection])
#print 'Adding image_summary for %s in collections %s, %s' % (var, 'param_collection', orig_collection)
if train:
tf.image_summary(normalize_name(train_name), var, collections=['train_collection', orig_collection])
#print 'Adding image_summary for %s in collections %s, %s' % (var, 'train_collection', orig_collection)
if test:
tf.image_summary(normalize_name(test_name), var, collections=['test_collection', orig_collection])
#print 'Adding image_summary for %s in collections %s, %s' % (var, 'test_collection', orig_collection)
def add_grads_and_vars_hist_summaries(grads_and_vars):
'''Adds param summary of var and hist summaries of grad values for
the given list of (grad,var) tuples. Usually these tuples will
come from the optimizer, e.g. via:
grads_and_vars = opt_rest.compute_gradients(model.v.loss, model.trainable_weights())
'''
for grad, var in grads_and_vars:
if grad is None:
continue
grad_name = '%s__grad' % var.name
hist_summaries_train(grad, name=grad_name)
hist_summaries_param(var)
def add_grad_summaries(grads_and_vars, add_summaries_train=True, quiet=False):
# Add summary nodes for grad values and prints a summary as well
if not quiet:
print('\nGrads:')
for grad, var in grads_and_vars:
if grad is None:
continue # skip grads that are None (corner case: not computed because model.loss has no dependence?)
grad_name = '%s/%s__grad' % tuple(var.name.rsplit('/', 1))
if not quiet:
print(' ', grad_name, grad)
if add_summaries_train:
hist_summaries_train(grad, name=grad_name)
if not quiet:
print()
########################
# TF operation helpers
########################
def hacked_tf_one_hot(indices, depth, on_value, off_value, name=None):
'''Emulates new tf.one_hot in master.
# Real signature: tf.one_hot(indices, depth, on_value, off_value, axis=None, name=None)
# Assumed signature: tf.one_hot(indices, depth, on_value, off_value, axis=-1, name=None)
Not needed if using newer versions of TensorFlow.
'''
N = tf.shape(input=indices)[0]
range_Nx1 = tf.expand_dims(tf.cast(tf.range(N), dtype=tf.int64), 1)
indices_Nx1 = tf.expand_dims(indices, 1)
concat = tf.concat(1, [range_Nx1, indices_Nx1])
as_dense = tf.compat.v1.sparse_to_dense(concat,
tf.cast(tf.pack([N, depth]), dtype=tf.int64), # Assumption: axis=-1
on_value, off_value)
one_hot = tf.reshape(as_dense, (-1, depth), name=name)
return one_hot
def hacked_tf_nn_softmax(logits, name=None):
'''Like tf.nn.softmax but casts to float64 first as a workaround for this bug:
https://github.com/tensorflow/tensorflow/issues/4425
'''
logits_64 = tf.cast(logits, tf.float64)
out_64 = tf.nn.softmax(logits_64)
out_32 = tf.cast(out_64, tf.float32, name=name)
return out_32
def smooth_l1(x, name=None):
'''Pointwise smooth abs function'''
absx = tf.abs(x)
big = tf.cast(tf.greater(absx, tf.ones_like(absx)), tf.float32)
activation = tf.add(tf.mul(big, absx-.5), tf.mul((1-big), .5*tf.square(x)), name=name)
return activation
########################
# Misc helpers
########################
def sess_run_dict(sess, fetch_names, fetch_vars=None, feed_dict=None, options=None, run_metadata=None, **other_kwargs):
'''
Like sess.run but returns a dictionary of results
Usage:
sess_run_dict(sess, fetch_names, fetch_vars, ...)
sess_run_dict(sess, fetch_dict, ...)
'''
dict_mode = isinstance(fetch_names, dict)
if dict_mode:
assert fetch_vars is None, 'provide either dict or list of names and vars, not both'
fetch_dict = fetch_names
fetch_names = list(fetch_dict.keys())
fetch_vars = list(fetch_dict.values())
assert len(fetch_names) == len(fetch_vars), 'length of fetch_names must match length of fetch_vars'
assert isinstance(fetch_vars, list) or isinstance(fetch_vars, tuple), 'fetch_vars should be list or tuple'
result = sess.run(fetch_vars, feed_dict=feed_dict, options=options, run_metadata=run_metadata, **other_kwargs)
ret = {k:v for k,v in zip(fetch_names, result)}
return ret
def get_collection_intersection(*args):
ret = []
for ii,arg in enumerate(args):
if ii == 0:
ret = OrderedSet(tf.compat.v1.get_collection(arg))
else:
ret = ret.intersection(OrderedSet(tf.compat.v1.get_collection(arg)))
return list(ret)
def get_collection_intersection_summary(*args):
'''Returns a tf.merge_summary of the given collection intersection, or None if the intersection is empty.'''
col_int = get_collection_intersection(*args)
if col_int:
return tf.compat.v1.summary.merge(col_int)
def summarize_weights(weights, sess=None):
'''Print summary of each weight tensor in a list of weight tensors.
Example usage:
summarize_weights(model.trainable_weights)
if sess is provided, also print weight min, max, and RMS
'''
if sess:
vals = sess.run(weights)
total_params = 0
titl = ' %50s: %10s %-20s' % ('NAME', 'SIZE', 'SHAPE')
if sess:
titl += ' %10s, %10s, %10s' % ('MIN', 'MAX', 'RMS')
print(titl)
for ii,var in enumerate(weights):
st = ' %50s: %10d %-20s' % (var.name, np.prod(var.get_shape().as_list()), var.get_shape().as_list())
if sess:
val = vals[ii]
st += ' %10s, %10s, %10s' % ('%.3g' % val.min(), '%.3g' % val.max(), '%.3g' % np.sqrt((val**2).mean()))
print(st)
total_params += np.prod(var.get_shape().as_list())
print(' %50s: %10d' % ('Total', total_params))
return total_params
def val_or_dynamic(vord):
return '<dynamic>' if isinstance(vord, tf.Tensor) else repr(vord)
def summarize_opt(opt):
print('Optimizer:')
print(' ', opt)
if isinstance(opt, tf.compat.v1.train.MomentumOptimizer):
print(' LR: %s, momentum: %g, use_nesterov: %s' % (val_or_dynamic(opt._learning_rate), opt._momentum, opt._use_nesterov))
elif isinstance(opt, tf.compat.v1.train.RMSPropOptimizer):
print(' LR: %s, momentum: %g, decay: %g, epsilon: %g' % (val_or_dynamic(opt._learning_rate), opt._momentum, opt._decay, opt._epsilon))
elif isinstance(opt, tf.compat.v1.train.AdamOptimizer):
print(' LR: %s, beta1: %g, beta2: %g, epsilon: %g' % (val_or_dynamic(opt._lr), opt._beta1, opt._beta2, opt._epsilon))
else:
print(' (cannot summarize unknown type of optimizer)')
def tf_assert_gpu(sess):
with tf.device('/gpu:0'):
foo = tf.compat.v1.placeholder(tf.float32, name='assert_gpu')
bar = tf.add(foo, 1, name='assert_gpu')
try:
sess.run(bar, {foo: 1})
except:
print('\n\n\ntf_assert_gpu: no GPU is present! In case it helps, CUDA_VISIBLE_DEVICES is %s' % repr(os.environ.get('CUDA_VISIBLE_DEVICES', None)))
print('See error below:\n\n\n')
raise
def tf_assert_all_init(sess):
uninit_vars = sess.run(tf.compat.v1.report_uninitialized_variables())
assert len(uninit_vars) == 0, 'Expected all variables to have been initialized, but these have not been: %s' % uninit_vars
def tf_get_uninitialized_variables(sess):
'''A bit of a hack from
https://stackoverflow.com/questions/35164529/in-tensorflow-is-there-any-way-to-just-initialize-uninitialised-variables
to get a list of all uninitialized Variable objects from the
graph
'''
uninitialized_vars = []
for var in tf.compat.v1.global_variables():
try:
sess.run(var)
except tf.errors.FailedPreconditionError:
uninitialized_vars.append(var)
return uninitialized_vars
| 15,340 | 39.265092 | 157 |
py
|
fl-analysis
|
fl-analysis-master/src/subspace/general/image_preproc.py
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
class ImagePreproc(object):
'''Class to handle common image preprocessing (center crops or
random crops with random flips).
'''
def __init__(self):
self.buf = None
def get_buffer(self, shape, dtype):
if self.buf is None or self.buf.shape != shape or self.buf.dtype != dtype:
print('ImagePreproc: creating new buffer')
self.buf = np.zeros(shape, dtype)
return self.buf
def center_crops(self, dat, crop_size):
'''Returns the center crops.
dat: (b, 0, 1, c)
crop_size: e.g. (227,227)
'''
nims = dat.shape[0]
#nch = 3
nch = dat.shape[-1]
ret_shape = (nims, crop_size[0], crop_size[1], nch)
ret = self.get_buffer(ret_shape, dtype=dat.dtype) # Reuse buffer if possible
off0 = (dat.shape[1]-crop_size[0])/2
off1 = (dat.shape[2]-crop_size[1])/2
ret = dat[:, off0:off0+crop_size[0], off1:off1+crop_size[1], :]
return ret
def random_crops(self, dat, crop_size, mirror=True):
'''Returns random crops of the given size
dat: (b, 0, 1, c)
crop_size: e.g. (227,227)
'''
nims = dat.shape[0]
#nch = 3
nch = dat.shape[-1]
ret_shape = (nims, crop_size[0], crop_size[1], nch)
ret = self.get_buffer(ret_shape, dtype=dat.dtype) # Reuse buffer if possible
maxoff0 = dat.shape[1]-crop_size[0]
maxoff1 = dat.shape[2]-crop_size[1]
off0s = np.random.randint(0,maxoff0,nims)
off1s = np.random.randint(0,maxoff1,nims)
domirror = np.random.randint(0,2,nims)
for ii in range(nims):
off0 = off0s[ii]
off1 = off1s[ii]
if mirror and domirror[ii] == 0:
ret[ii] = dat[ii, off0:off0+crop_size[0], off1:off1+crop_size[1], :][:,::-1] # reverse column dimension
else:
ret[ii] = dat[ii, off0:off0+crop_size[0], off1:off1+crop_size[1], :]
return ret
def color_normalize(self, dat, mean, std):
'''normalize each color channel with provided mean and std'''
nims = dat.shape[0]
nch = 3
ret_shape = (nims, dat.shape[1], dat.shape[2], nch)
ret = self.get_buffer(ret_shape, dtype=dat.dtype) # Reuse buffer if possible
for ii in range(nch):
ret[:,:,:,ii] = (dat[:,:,:,ii] - mean[ii]) / std[ii]
return ret
| 3,582 | 37.945652 | 122 |
py
|
fl-analysis
|
fl-analysis-master/src/subspace/general/util.py
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import errno
import time
class DotDict(dict):
"""
Example:
mm = DotDict({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
"""
def __init__(self, *args, **kwargs):
super(DotDict, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
self[k] = v
if kwargs:
for k, v in kwargs.items():
self[k] = v
def __getattr__(self, attr):
if not attr in self:
raise AttributeError(attr)
return self.get(attr)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(DotDict, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, attr):
if not attr in self:
raise AttributeError(attr)
self.__delitem__(attr)
def __delitem__(self, key):
super(DotDict, self).__delitem__(key)
del self.__dict__[key]
def __repr__(self):
dict_rep = super(DotDict, self).__repr__()
return 'DotDict(%s)' % dict_rep
class WithTimer(object):
def __init__(self, title = '', quiet = False):
self.title = title
self.quiet = quiet
def elapsed(self):
return time.time() - self.wall, time.clock() - self.proc
def enter(self):
'''Manually trigger enter'''
self.__enter__()
def __enter__(self):
self.proc = time.clock()
self.wall = time.time()
return self
def __exit__(self, *args):
if not self.quiet:
titlestr = (' ' + self.title) if self.title else ''
print('Elapsed%s: wall: %.06f, sys: %.06f' % ((titlestr,) + self.elapsed()))
class TicToc(object):
def __init__(self):
self.reset()
def reset(self):
self._proc = time.clock()
self._wall = time.time()
def elapsed(self):
return self.wall(), self.proc()
def wall(self):
return time.time() - self._wall
def proc(self):
return time.clock() - self._proc
globalTicToc = TicToc()
globalTicToc2 = TicToc()
globalTicToc3 = TicToc()
def tic():
'''Like Matlab tic/toc for wall time and processor time'''
globalTicToc.reset()
def toc():
'''Like Matlab tic/toc for wall time'''
return globalTicToc.wall()
def tocproc():
'''Like Matlab tic/toc, but for processor time'''
return globalTicToc.proc()
def tic2():
globalTicToc2.reset()
def toc2():
return globalTicToc2.wall()
def tocproc2():
return globalTicToc2.proc()
def tic3():
globalTicToc3.reset()
def toc3():
return globalTicToc3.wall()
def tocproc3():
return globalTicToc3.proc()
def mkdir_p(path):
# From https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| 4,248 | 27.709459 | 88 |
py
|
fl-analysis
|
fl-analysis-master/src/subspace/general/__init__.py
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
| 1,104 | 51.619048 | 80 |
py
|
fl-analysis
|
fl-analysis-master/src/subspace/general/stats_buddy.py
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import bisect
from colorama import Style
import numpy as np
import time
class StatsBuddy(object):
'''Your training stat collecting buddy!
epochs: epoch E is after seeing and updating from the entire training set E times.
train_iter: train iter T is after seeing and updating from T train mini-batches.
Both start at 0, which implies zero training.'''
def __init__(self, pretty_replaces=None, default_pretty_replaces=True):
self._epoch = 0
self._train_iter = 0
self._wall = None
self._wall_total = 0 # Saved time, perhaps from other runs
self._pretty_replaces = []
if default_pretty_replaces:
self._pretty_replaces.extend([
('train_', ''),
('val_', ''),
('test_', ''),
('loss', 'l'),
('accuracy', 'acc'),
('cross_entropy','xe'),
('cross_ent','xe'),
('euclidean','euc'),
(':0',''),
])
if pretty_replaces:
self._pretty_replaces.extend(pretty_replaces)
# Data is stored as dict of four lists: [epoch_list, train_iter_list, weight_list, value_list]
# Each of the four lists is the same length.
self._data = {}
def _get_fetch_kwargs(self, raise_on_empty=True, empty_val=-123, assert_sync_epoch=True):
return raise_on_empty, empty_val, assert_sync_epoch
@property
def epoch(self):
return self._epoch
@property
def train_iter(self):
return self._train_iter
def tic(self):
# Just need to call once on model creation.
# Must also be called when loading a saved StatsBuddy from disk and resuming run!
self._wall = time.time()
def toc(self):
assert self._wall, 'toc called without tic'
elapsed = time.time() - self._wall
self._wall_total += elapsed
self._wall = time.time()
return self._wall_total
def inc_epoch(self):
self._epoch += 1
def inc_train_iter(self):
self._train_iter += 1
def note(self, **kwargs):
'''Main stat collection function. See below for methods providing various syntactic sugar.'''
weight = kwargs['_weight'] if '_weight' in kwargs else 1.0
for key in sorted(kwargs.keys()):
if key == '_weight':
continue
value = kwargs[key]
#print key, value
self.note_one(key, value, weight=weight)
def note_weighted(self, _weight, **kwargs):
'''Convenience function to call note with explicit weight.'''
assert '_weight' not in kwargs, 'Provided weight twice (via positional arg and kwarg)'
self.note(_weight=_weight, **kwargs)
def note_weighted_list(self, _weight, name_list, value_list, prefix='', suffix=''):
'''Convenience function to call note with explicit weight and
list of names and values. Prefix and/or suffix, if given, are
concatenated to the beginnning and/or end of each name.
'''
assert len(name_list) == len(value_list), 'length mismatch'
for name,value in zip(name_list, value_list):
final_name = prefix + name + suffix
self.note_one(final_name, value, weight=_weight)
def note_list(self, name_list, value_list, prefix='', suffix=''):
'''Convenience function to call weighted_note_list with a
weight of 1.0
'''
self.note_weighted_list(1.0, name_list, value_list, prefix=prefix, suffix=suffix)
def note_one(self, key, value, weight=1.0):
epoch_list, train_iter_list, weight_list, value_list = self._data.setdefault(key, [[], [], [], []])
epoch_list.append(self.epoch)
train_iter_list.append(self.train_iter)
weight_list.append(weight)
value_list.append(value)
#print 'Noted: %20s, e: %d, ti: %d, w: %g, v: %g' % (key, self.epoch, self.train_iter, weight, value)
def last(self, *args, **kwargs):
'''Get last values as list'''
raise_on_empty, empty_val, assert_sync_epoch = self._get_fetch_kwargs(**kwargs)
last_as_dict = self.last_as_dict(*args, raise_on_empty=raise_on_empty, empty_val=empty_val)
return [last_as_dict[key] for key in args]
def last_as_dict(self, *args, **kwargs):
'''Get last values as dict. Not guaranteed for each value to be at the same epoch or training iteration!'''
raise_on_empty, empty_val, assert_sync_epoch = self._get_fetch_kwargs(**kwargs)
ret = {}
for key in args:
epoch_list, train_iter_list, weight_list, value_list = self._data.setdefault(key, [[], [], [], []])
if value_list:
ret[key] = value_list[-1]
else:
if raise_on_empty:
raise Exception('No value for %s yet recorded' % key)
else:
ret[key] = empty_val
return ret
def last_list_re(self, regex, **kwargs):
ret = []
for key in sorted(self._data.keys()):
if re.search(regex, key):
ret.append((key, self.last(key, **kwargs)[0]))
return ret
def last_pretty_re(self, regex, style='', **kwargs):
keys_values = self.last_list_re(regex, **kwargs)
return self._summary_pretty_re(keys_values, style=style)
def epoch_mean(self, *args, **kwargs):
raise_on_empty, empty_val, assert_sync_epoch = self._get_fetch_kwargs(**kwargs)
means_as_dict = self.epoch_mean_as_dict(*args, raise_on_empty=raise_on_empty, empty_val=empty_val)
return [means_as_dict[key] for key in args]
def epoch_mean_as_dict(self, *args, **kwargs):
'''Get mean of each field over most recently recorded epoch,
as dict. Not guaranteed to be the same epoch for each value
unless assert_sync_epoch is True.
'''
raise_on_empty, empty_val, assert_sync_epoch = self._get_fetch_kwargs(**kwargs)
ret = {}
ep = None
for key in args:
epoch_list, train_iter_list, weight_list, value_list = self._data.setdefault(key, [[], [], [], []])
if value_list:
if ep is None:
ep = epoch_list[-1]
if assert_sync_epoch:
assert ep == epoch_list[-1], 'Epoch mismatch between requested epoch means'
else:
ep = epoch_list[-1]
ep_end = len(epoch_list)
ep_begin = bisect.bisect_left(epoch_list, ep)
#print 'Taking epoch mean over %d records' % (ep_end - ep_begin)
assert ep_begin != ep_end, 'Logic error with bisect_left or data insertion order.'
values = np.array(value_list[ep_begin:ep_end])
weights = np.array(weight_list[ep_begin:ep_end])
# remove nan from `values` and `weights` array
valid_ids = np.where(~np.isnan(values))[0]
values = values[valid_ids]
weights = weights[valid_ids]
if len(valid_ids) == 0:
ret[key] = np.nan
else:
weights = weights / float(max(1e-6, weights.sum()))
assert len(values.shape) == 1, 'expected vector'
assert len(weights.shape) == 1, 'expected vector'
ret[key] = np.dot(values, weights)
else:
if raise_on_empty:
raise Exception('No value for %s yet recorded' % key)
else:
ret[key] = empty_val
return ret
def epoch_mean_summary_re(self, regex, **kwargs):
return ', '.join(self.epoch_mean_list_re(regex, **kwargs))
def epoch_mean_pretty_re(self, regex, style='', **kwargs):
keys_values = self.epoch_mean_list_re(regex, **kwargs)
return self._summary_pretty_re(keys_values, style=style)
def epoch_mean_list_re(self, regex, **kwargs):
ret = []
for key in sorted(self._data.keys()):
if re.search(regex, key):
ret.append((key, self.epoch_mean(key, **kwargs)[0]))
return ret
def epoch_mean_list_all(self, **kwargs):
ret = []
for key in sorted(self._data.keys()):
ret.append((key, self.epoch_mean(key, **kwargs)[0]))
return ret
def _summary_pretty_re(self, keys_values, style=''):
'''Produce a short, printable summary. Strips "train_" and "test_" strings assuming they will be printed elsewhere.'''
ret = []
losses_seen = 0
for key, value in keys_values:
short = key
for orig,new in self._pretty_replaces:
short = short.replace(orig, new)
tup = (short, value)
if key in ('loss', 'train_loss', 'val_loss', 'test_loss'):
ret.insert(0, tup)
losses_seen += 1
elif 'loss' in key:
ret.insert(losses_seen, tup)
losses_seen += 1
else:
ret.append(tup)
if style:
return ', '.join(['%s: %s%7s%s' % (tt[0], style, '%.4f' % tt[1], Style.RESET_ALL) for tt in ret])
else:
return ', '.join(['%s: %.4f' % (tt[0], tt[1]) for tt in ret])
def data_per_iter(self):
ret = {}
for key in list(self._data.keys()):
ret[key] = {}
ret[key]['iter'] = np.array(self._data[key][1])
ret[key]['val'] = np.array(self._data[key][3])
return ret
| 10,845 | 41.03876 | 126 |
py
|
fl-analysis
|
fl-analysis-master/src/subspace/builder/resnet.py
|
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import (Flatten, Input, Activation,
Reshape, Dropout, Convolution2D,
MaxPooling2D, BatchNormalization,
Conv2D, GlobalAveragePooling2D,
Concatenate, AveragePooling2D,
LocallyConnected2D, Dense)
# from general.tfutil import hist_summaries_traintest, scalar_summaries_traintest
from src.subspace.builder.model_builders import make_and_add_losses
from src.subspace.keras_ext.engine import ExtendedModel
from src.subspace.keras_ext.layers import (RProjDense,
RProjConv2D,
RProjBatchNormalization,
RProjLocallyConnected2D)
from src.subspace.keras_ext.rproj_layers_util import (OffsetCreatorDenseProj,
OffsetCreatorSparseProj,
OffsetCreatorFastfoodProj,
FastWalshHadamardProjector,
ThetaPrime, MultiplyLayer)
from src.subspace.keras_ext.util import make_image_input_preproc
from tensorflow.keras.regularizers import l2
def resnet_layer(inputs,
offset_creator_class,
vv,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True,
kernel_regularizer=l2(1e-4),
name=None):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string|None): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = RProjConv2D(offset_creator_class, vv,
num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
activation=None,
kernel_regularizer=kernel_regularizer,
name=name)
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = RProjBatchNormalization(offset_creator_class, vv)(x)
# x = BatchNormalization()(x) # does this even make sense
if activation is not None:
x = Activation(activation)(x)
else:
pass
# if batch_normalization:
# x = BatchNormalization()(x)
# if activation is not None:
# x = Activation(activation)(x)
# x = conv(x)
return x
def build_LeNet_resnet(depth, weight_decay=0, vsize=100, shift_in=None, proj_type='sparse', disable_bn=False):
im_shape = (32, 32, 3)
n_label_vals = 10
im_dtype = 'float32'
batch_norm_enabled = not disable_bn
if (depth - 2) % 6 != 0:
raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
num_res_blocks = int((depth - 2) / 6)
assert proj_type in ('dense', 'sparse')
if proj_type == 'dense':
offset_creator_class = OffsetCreatorDenseProj
else:
# sparse
offset_creator_class = OffsetCreatorSparseProj
with tf.name_scope('inputs'):
input_images, preproc_images = make_image_input_preproc(im_shape, im_dtype, shift_in=shift_in)
input_labels = Input(batch_shape=(None,), dtype='int64')
with tf.name_scope('net') as scope:
vv = ThetaPrime(vsize)
num_filters = 16
x = resnet_layer(preproc_images, offset_creator_class, vv,
num_filters=num_filters,
batch_normalization=batch_norm_enabled)
# Instantiate the stack of residual units
for stack in range(3):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0: # first layer but not first stack
strides = 2 # downsample
y = resnet_layer(x,
offset_creator_class,
vv,
num_filters=num_filters,
strides=strides,
name=f"Conv2D_stack{stack}_res{res_block}_l0",
batch_normalization=batch_norm_enabled)
y = resnet_layer(y,
offset_creator_class,
vv,
num_filters=num_filters,
activation=None,
name=f"Conv2D_stack{stack}_res{res_block}_l1",
batch_normalization=batch_norm_enabled)
if stack > 0 and res_block == 0: # first layer but not first stack
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(x,
offset_creator_class,
vv,
num_filters=num_filters,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False,
name=f"Conv2D_stack{stack}_res{res_block}_l2")
x = tf.keras.layers.add([x, y])
x = Activation('relu')(x)
num_filters *= 2
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
logits = RProjDense(offset_creator_class, vv, n_label_vals,
activation='softmax',
kernel_initializer='he_normal')(y)
model = ExtendedModel(input=input_images, output=logits)
model.add_extra_trainable_weight(vv.var_2d)
nontrackable_fields = ['input_images', 'preproc_images', 'input_labels', 'logits']
for field in nontrackable_fields:
model.add_var(field, locals()[field])
make_and_add_losses(model, input_labels)
return model
def resnet_layer_ff(inputs,
conv2d_class,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True,
kernel_regularizer=l2(1e-4),
name=None):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string|None): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = conv2d_class(num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
activation=None,
kernel_regularizer=kernel_regularizer,
name=name)
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x) # does this even make sense
if activation is not None:
x = Activation(activation)(x)
else:
pass
# if batch_normalization:
# x = BatchNormalization()(x)
# if activation is not None:
# x = Activation(activation)(x)
# x = conv(x)
return x
def build_resnet_fastfood(depth, weight_decay=0, vsize=100, shift_in=None, proj_type='sparse', DD=None):
im_shape = (32, 32, 3)
n_label_vals = 10
im_dtype = 'float32'
if (depth - 2) % 6 != 0:
raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
num_res_blocks = int((depth - 2) / 6)
assert proj_type in ('dense', 'sparse')
if proj_type == 'dense':
offset_creator_class = OffsetCreatorDenseProj
else:
# sparse
offset_creator_class = OffsetCreatorSparseProj
with tf.name_scope('inputs'):
input_images, preproc_images = make_image_input_preproc(im_shape, im_dtype, shift_in=shift_in)
input_labels = Input(batch_shape=(None,), dtype='int64')
def define_model(input_images, DenseLayer, ConvLayer):
vv = ThetaPrime(vsize)
num_filters = 16
x = resnet_layer_ff(preproc_images, ConvLayer, num_filters=num_filters)
# Instantiate the stack of residual units
for stack in range(3):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0: # first layer but not first stack
strides = 2 # downsample
y = resnet_layer_ff(x,
ConvLayer,
num_filters=num_filters,
strides=strides,
name=f"Conv2D_stack{stack}_res{res_block}_l0")
y = resnet_layer_ff(y,
ConvLayer,
num_filters=num_filters,
activation=None,
name=f"Conv2D_stack{stack}_res{res_block}_l1")
if stack > 0 and res_block == 0: # first layer but not first stack
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer_ff(x,
ConvLayer,
num_filters=num_filters,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False,
name=f"Conv2D_stack{stack}_res{res_block}_l2")
x = tf.keras.layers.add([x, y])
x = Activation('relu')(x)
num_filters *= 2
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
logits = DenseLayer(n_label_vals,
activation='softmax',
kernel_initializer='he_normal')(y)
model = ExtendedModel(input=input_images, output=logits)
model.add_extra_trainable_weight(vv.var_2d)
return model
if not DD:
with tf.name_scope('net_disposable'):
# Make disposable direct model
model_disposable = define_model(input_images, Dense, Conv2D)
DD = np.sum([np.prod(var.get_shape().as_list()) for var in model_disposable.trainable_weights]).item()
print(f"D {DD} {type(DD)}")
del model_disposable
with tf.name_scope('net'):
# Make real RProj FWH model
fwh_projector = FastWalshHadamardProjector(vsize, DD)
DenseLayer = lambda *args, **kwargs: RProjDense(OffsetCreatorFastfoodProj, fwh_projector, *args, **kwargs)
Conv2DLayer = lambda *args, **kwargs: RProjConv2D(OffsetCreatorFastfoodProj, fwh_projector, *args, **kwargs)
model = define_model(input_images, DenseLayer, Conv2DLayer)
fwh_projector.check_usage()
for ww in fwh_projector.trainable_weights:
model.add_extra_trainable_weight(ww)
for ww in fwh_projector.non_trainable_weights:
model.add_extra_non_trainable_weight(ww)
nontrackable_fields = ['input_images', 'preproc_images', 'input_labels', 'logits']
for field in nontrackable_fields:
model.add_var(field, locals()[field])
make_and_add_losses(model, input_labels)
return model
# Model: "model"
# __________________________________________________________________________________________________
# Layer (type) Output Shape Param # Connected to
# ==================================================================================================
# input_1 (InputLayer) [(None, 32, 32, 3)] 0
# __________________________________________________________________________________________________
# conv2d (Conv2D) (None, 32, 32, 16) 448 input_1[0][0]
# __________________________________________________________________________________________________
# batch_normalization (BatchNorma (None, 32, 32, 16) 64 conv2d[0][0]
# __________________________________________________________________________________________________
# activation (Activation) (None, 32, 32, 16) 0 batch_normalization[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack0_res0_l0 (Conv2D) (None, 32, 32, 16) 2320 activation[0][0]
# __________________________________________________________________________________________________
# batch_normalization_1 (BatchNor (None, 32, 32, 16) 64 Conv2D_stack0_res0_l0[0][0]
# __________________________________________________________________________________________________
# activation_1 (Activation) (None, 32, 32, 16) 0 batch_normalization_1[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack0_res0_l1 (Conv2D) (None, 32, 32, 16) 2320 activation_1[0][0]
# __________________________________________________________________________________________________
# batch_normalization_2 (BatchNor (None, 32, 32, 16) 64 Conv2D_stack0_res0_l1[0][0]
# __________________________________________________________________________________________________
# add (Add) (None, 32, 32, 16) 0 activation[0][0]
# batch_normalization_2[0][0]
# __________________________________________________________________________________________________
# activation_2 (Activation) (None, 32, 32, 16) 0 add[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack0_res1_l0 (Conv2D) (None, 32, 32, 16) 2320 activation_2[0][0]
# __________________________________________________________________________________________________
# batch_normalization_3 (BatchNor (None, 32, 32, 16) 64 Conv2D_stack0_res1_l0[0][0]
# __________________________________________________________________________________________________
# activation_3 (Activation) (None, 32, 32, 16) 0 batch_normalization_3[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack0_res1_l1 (Conv2D) (None, 32, 32, 16) 2320 activation_3[0][0]
# __________________________________________________________________________________________________
# batch_normalization_4 (BatchNor (None, 32, 32, 16) 64 Conv2D_stack0_res1_l1[0][0]
# __________________________________________________________________________________________________
# add_1 (Add) (None, 32, 32, 16) 0 activation_2[0][0]
# batch_normalization_4[0][0]
# __________________________________________________________________________________________________
# activation_4 (Activation) (None, 32, 32, 16) 0 add_1[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack0_res2_l0 (Conv2D) (None, 32, 32, 16) 2320 activation_4[0][0]
# __________________________________________________________________________________________________
# batch_normalization_5 (BatchNor (None, 32, 32, 16) 64 Conv2D_stack0_res2_l0[0][0]
# __________________________________________________________________________________________________
# activation_5 (Activation) (None, 32, 32, 16) 0 batch_normalization_5[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack0_res2_l1 (Conv2D) (None, 32, 32, 16) 2320 activation_5[0][0]
# __________________________________________________________________________________________________
# batch_normalization_6 (BatchNor (None, 32, 32, 16) 64 Conv2D_stack0_res2_l1[0][0]
# __________________________________________________________________________________________________
# add_2 (Add) (None, 32, 32, 16) 0 activation_4[0][0]
# batch_normalization_6[0][0]
# __________________________________________________________________________________________________
# activation_6 (Activation) (None, 32, 32, 16) 0 add_2[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack1_res0_l0 (Conv2D) (None, 16, 16, 32) 4640 activation_6[0][0]
# __________________________________________________________________________________________________
# batch_normalization_7 (BatchNor (None, 16, 16, 32) 128 Conv2D_stack1_res0_l0[0][0]
# __________________________________________________________________________________________________
# activation_7 (Activation) (None, 16, 16, 32) 0 batch_normalization_7[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack1_res0_l1 (Conv2D) (None, 16, 16, 32) 9248 activation_7[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack1_res0_l2 (Conv2D) (None, 16, 16, 32) 544 activation_6[0][0]
# __________________________________________________________________________________________________
# batch_normalization_8 (BatchNor (None, 16, 16, 32) 128 Conv2D_stack1_res0_l1[0][0]
# __________________________________________________________________________________________________
# add_3 (Add) (None, 16, 16, 32) 0 Conv2D_stack1_res0_l2[0][0]
# batch_normalization_8[0][0]
# __________________________________________________________________________________________________
# activation_8 (Activation) (None, 16, 16, 32) 0 add_3[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack1_res1_l0 (Conv2D) (None, 16, 16, 32) 9248 activation_8[0][0]
# __________________________________________________________________________________________________
# batch_normalization_9 (BatchNor (None, 16, 16, 32) 128 Conv2D_stack1_res1_l0[0][0]
# __________________________________________________________________________________________________
# activation_9 (Activation) (None, 16, 16, 32) 0 batch_normalization_9[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack1_res1_l1 (Conv2D) (None, 16, 16, 32) 9248 activation_9[0][0]
# __________________________________________________________________________________________________
# batch_normalization_10 (BatchNo (None, 16, 16, 32) 128 Conv2D_stack1_res1_l1[0][0]
# __________________________________________________________________________________________________
# add_4 (Add) (None, 16, 16, 32) 0 activation_8[0][0]
# batch_normalization_10[0][0]
# __________________________________________________________________________________________________
# activation_10 (Activation) (None, 16, 16, 32) 0 add_4[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack1_res2_l0 (Conv2D) (None, 16, 16, 32) 9248 activation_10[0][0]
# __________________________________________________________________________________________________
# batch_normalization_11 (BatchNo (None, 16, 16, 32) 128 Conv2D_stack1_res2_l0[0][0]
# __________________________________________________________________________________________________
# activation_11 (Activation) (None, 16, 16, 32) 0 batch_normalization_11[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack1_res2_l1 (Conv2D) (None, 16, 16, 32) 9248 activation_11[0][0]
# __________________________________________________________________________________________________
# batch_normalization_12 (BatchNo (None, 16, 16, 32) 128 Conv2D_stack1_res2_l1[0][0]
# __________________________________________________________________________________________________
# add_5 (Add) (None, 16, 16, 32) 0 activation_10[0][0]
# batch_normalization_12[0][0]
# __________________________________________________________________________________________________
# activation_12 (Activation) (None, 16, 16, 32) 0 add_5[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack2_res0_l0 (Conv2D) (None, 8, 8, 64) 18496 activation_12[0][0]
# __________________________________________________________________________________________________
# batch_normalization_13 (BatchNo (None, 8, 8, 64) 256 Conv2D_stack2_res0_l0[0][0]
# __________________________________________________________________________________________________
# activation_13 (Activation) (None, 8, 8, 64) 0 batch_normalization_13[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack2_res0_l1 (Conv2D) (None, 8, 8, 64) 36928 activation_13[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack2_res0_l2 (Conv2D) (None, 8, 8, 64) 2112 activation_12[0][0]
# __________________________________________________________________________________________________
# batch_normalization_14 (BatchNo (None, 8, 8, 64) 256 Conv2D_stack2_res0_l1[0][0]
# __________________________________________________________________________________________________
# add_6 (Add) (None, 8, 8, 64) 0 Conv2D_stack2_res0_l2[0][0]
# batch_normalization_14[0][0]
# __________________________________________________________________________________________________
# activation_14 (Activation) (None, 8, 8, 64) 0 add_6[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack2_res1_l0 (Conv2D) (None, 8, 8, 64) 36928 activation_14[0][0]
# __________________________________________________________________________________________________
# batch_normalization_15 (BatchNo (None, 8, 8, 64) 256 Conv2D_stack2_res1_l0[0][0]
# __________________________________________________________________________________________________
# activation_15 (Activation) (None, 8, 8, 64) 0 batch_normalization_15[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack2_res1_l1 (Conv2D) (None, 8, 8, 64) 36928 activation_15[0][0]
# __________________________________________________________________________________________________
# batch_normalization_16 (BatchNo (None, 8, 8, 64) 256 Conv2D_stack2_res1_l1[0][0]
# __________________________________________________________________________________________________
# add_7 (Add) (None, 8, 8, 64) 0 activation_14[0][0]
# batch_normalization_16[0][0]
# __________________________________________________________________________________________________
# activation_16 (Activation) (None, 8, 8, 64) 0 add_7[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack2_res2_l0 (Conv2D) (None, 8, 8, 64) 36928 activation_16[0][0]
# __________________________________________________________________________________________________
# batch_normalization_17 (BatchNo (None, 8, 8, 64) 256 Conv2D_stack2_res2_l0[0][0]
# __________________________________________________________________________________________________
# activation_17 (Activation) (None, 8, 8, 64) 0 batch_normalization_17[0][0]
# __________________________________________________________________________________________________
# Conv2D_stack2_res2_l1 (Conv2D) (None, 8, 8, 64) 36928 activation_17[0][0]
# __________________________________________________________________________________________________
# batch_normalization_18 (BatchNo (None, 8, 8, 64) 256 Conv2D_stack2_res2_l1[0][0]
# __________________________________________________________________________________________________
# add_8 (Add) (None, 8, 8, 64) 0 activation_16[0][0]
# batch_normalization_18[0][0]
# __________________________________________________________________________________________________
# activation_18 (Activation) (None, 8, 8, 64) 0 add_8[0][0]
# __________________________________________________________________________________________________
# average_pooling2d (AveragePooli (None, 1, 1, 64) 0 activation_18[0][0]
# __________________________________________________________________________________________________
# flatten (Flatten) (None, 64) 0 average_pooling2d[0][0]
# __________________________________________________________________________________________________
# dense (Dense) (None, 10) 650 flatten[0][0]
# ==================================================================================================
# Total params: 274,442
# Trainable params: 273,066
# Non-trainable params: 1,376
# __________________________________________________________________________________________________
| 27,766 | 57.21174 | 116 |
py
|
fl-analysis
|
fl-analysis-master/src/subspace/builder/model_builders.py
|
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import (Dense, Flatten, Input, Activation,
Reshape, Dropout, Convolution2D,
MaxPooling2D, BatchNormalization,
Conv2D, GlobalAveragePooling2D,
Concatenate, AveragePooling2D,
LocallyConnected2D)
# from general.tfutil import hist_summaries_traintest, scalar_summaries_traintest
from src.subspace.keras_ext.engine import ExtendedModel
from src.subspace.keras_ext.layers import (RProjDense,
RProjConv2D,
RProjBatchNormalization,
RProjLocallyConnected2D)
from src.subspace.keras_ext.rproj_layers_util import (OffsetCreatorDenseProj,
OffsetCreatorSparseProj,
OffsetCreatorFastfoodProj,
FastWalshHadamardProjector,
ThetaPrime, MultiplyLayer)
from src.subspace.keras_ext.util import make_image_input_preproc
from tensorflow.keras.regularizers import l2
def make_and_add_losses(model, input_labels):
'''Add classification and L2 losses'''
with tf.compat.v1.name_scope('losses') as scope:
prob = tf.nn.softmax(model.v.logits, name='prob')
cross_ent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=model.v.logits, labels=input_labels, name='cross_ent')
loss_cross_ent = tf.reduce_mean(input_tensor=cross_ent, name='loss_cross_ent')
model.add_trackable('loss_cross_ent', loss_cross_ent)
class_prediction = tf.argmax(input=prob, axis=1)
prediction_correct = tf.equal(class_prediction, input_labels, name='prediction_correct')
accuracy = tf.reduce_mean(input_tensor=tf.cast(prediction_correct, dtype=tf.float32), name='accuracy')
model.add_trackable('accuracy', accuracy)
# hist_summaries_traintest(prob, cross_ent)
# scalar_summaries_traintest(accuracy)
model.add_loss_reg()
if 'loss_reg' in model.v:
loss = tf.add_n((
model.v.loss_cross_ent,
model.v.loss_reg,
), name='loss')
else:
loss = model.v.loss_cross_ent
model.add_trackable('loss', loss)
nontrackable_fields = ['prob', 'cross_ent', 'class_prediction', 'prediction_correct']
for field in nontrackable_fields:
model.add_var(field, locals()[field])
def build_model_mnist_fc(weight_decay=0, vsize=100, depth=2, width=100, shift_in=None, proj_type='dense'):
im_shape = (28, 28, 1)
n_label_vals = 10
im_dtype = 'float32'
assert proj_type in ('dense', 'sparse')
if proj_type == 'dense':
offset_creator_class = OffsetCreatorDenseProj
else:
# sparse
offset_creator_class = OffsetCreatorSparseProj
with tf.compat.v1.name_scope('inputs'):
input_images, preproc_images = make_image_input_preproc(im_shape, im_dtype, shift_in=shift_in)
input_labels = Input(batch_shape=(None,), dtype='int64')
with tf.compat.v1.name_scope('net') as scope:
vv = ThetaPrime(vsize)
xx = input_images
xx = Flatten()(xx)
for _ in range(depth):
xx = RProjDense(offset_creator_class, vv, width, activation='relu', kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(xx)
# xx = Dense(width, activation='relu')(xx)
logits = RProjDense(offset_creator_class, vv, 10, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), activation='softmax')(xx)
# model = Model(input=input_images, output=logits)
model = ExtendedModel(input=input_images, output=logits)
model.add_extra_trainable_weight(vv.var_2d)
nontrackable_fields = ['input_images', 'preproc_images', 'input_labels', 'logits']
for field in nontrackable_fields:
model.add_var(field, locals()[field])
make_and_add_losses(model, input_labels)
return model, vv.var_2d
def build_cnn_model_mnist_bhagoji(weight_decay=0, vsize=100, shift_in=None, proj_type='sparse'):
im_shape = (28, 28, 1)
n_label_vals = 10
im_dtype = 'float32'
if proj_type == 'dense':
offset_creator_class = OffsetCreatorDenseProj
else:
# sparse
offset_creator_class = OffsetCreatorSparseProj
with tf.name_scope('inputs'):
input_images, preproc_images = make_image_input_preproc(im_shape, im_dtype, shift_in=shift_in)
input_labels = Input(batch_shape=(None,), dtype='int64')
with tf.name_scope('net') as scope:
vv = ThetaPrime(vsize)
xx = RProjConv2D(offset_creator_class, vv, 64, kernel_size=5, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(preproc_images)
xx = RProjConv2D(offset_creator_class, vv, 64, kernel_size=5, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(xx)
# xx = MaxPooling2D((2, 2))(xx)
xx = Flatten()(xx)
xx = RProjDense(offset_creator_class, vv, 128, kernel_initializer='he_normal', activation='relu',
kernel_regularizer=l2(weight_decay))(xx)
logits = RProjDense(offset_creator_class, vv, 10, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), activation='softmax')(xx)
model = ExtendedModel(input=input_images, output=logits)
model.add_extra_trainable_weight(vv.var_2d)
nontrackable_fields = ['input_images', 'preproc_images', 'input_labels', 'logits']
for field in nontrackable_fields:
model.add_var(field, locals()[field])
make_and_add_losses(model, input_labels)
return model
def build_cnn_model_mnist_dev_conv(weight_decay=0, vsize=100, shift_in=None, proj_type='sparse'):
im_shape = (28, 28, 1)
n_label_vals = 10
im_dtype = 'float32'
if proj_type == 'dense':
offset_creator_class = OffsetCreatorDenseProj
else:
# sparse
offset_creator_class = OffsetCreatorSparseProj
with tf.name_scope('inputs'):
input_images, preproc_images = make_image_input_preproc(im_shape, im_dtype, shift_in=shift_in)
input_labels = Input(batch_shape=(None,), dtype='int64')
with tf.name_scope('net') as scope:
vv = ThetaPrime(vsize)
xx = RProjConv2D(offset_creator_class, vv, 8, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(preproc_images)
xx = RProjConv2D(offset_creator_class, vv, 4, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(xx)
xx = MaxPooling2D((2, 2))(xx)
xx = Flatten()(xx)
xx = RProjDense(offset_creator_class, vv, 32, kernel_initializer='he_normal', activation='relu',
kernel_regularizer=l2(weight_decay))(xx)
logits = RProjDense(offset_creator_class, vv, 10, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), activation='softmax')(xx)
model = ExtendedModel(input=input_images, output=logits)
model.add_extra_trainable_weight(vv.var_2d)
nontrackable_fields = ['input_images', 'preproc_images', 'input_labels', 'logits']
for field in nontrackable_fields:
model.add_var(field, locals()[field])
make_and_add_losses(model, input_labels)
return model
def build_cnn_model_mnistcnn_conv(weight_decay=0, vsize=100, shift_in=None, proj_type='sparse'):
im_shape = (28, 28, 1)
n_label_vals = 10
im_dtype = 'float32'
if proj_type == 'dense':
offset_creator_class = OffsetCreatorDenseProj
else:
# sparse
offset_creator_class = OffsetCreatorSparseProj
with tf.name_scope('inputs'):
input_images, preproc_images = make_image_input_preproc(im_shape, im_dtype, shift_in=shift_in)
input_labels = Input(batch_shape=(None,), dtype='int64')
with tf.name_scope('net') as scope:
vv = ThetaPrime(vsize)
xx = RProjConv2D(offset_creator_class, vv, 64, kernel_size=2, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(preproc_images)
xx = MaxPooling2D((2, 2))(xx)
xx = RProjConv2D(offset_creator_class, vv, 32, kernel_size=2, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(xx)
xx = MaxPooling2D((2, 2))(xx)
xx = Flatten()(xx)
xx = RProjDense(offset_creator_class, vv, 256, kernel_initializer='he_normal', activation='relu',
kernel_regularizer=l2(weight_decay))(xx)
logits = RProjDense(offset_creator_class, vv, 10, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), activation='softmax')(xx)
model = ExtendedModel(input=input_images, output=logits)
model.add_extra_trainable_weight(vv.var_2d)
nontrackable_fields = ['input_images', 'preproc_images', 'input_labels', 'logits']
for field in nontrackable_fields:
model.add_var(field, locals()[field])
make_and_add_losses(model, input_labels)
return model
def build_cnn_model_cifar_allcnn(weight_decay=0, vsize=100, shift_in=None, proj_type='sparse'):
im_shape = (32, 32, 3)
n_label_vals = 10
im_dtype = 'float32'
if proj_type == 'dense':
offset_creator_class = OffsetCreatorDenseProj
else:
# sparse
offset_creator_class = OffsetCreatorSparseProj
with tf.name_scope('inputs'):
input_images, preproc_images = make_image_input_preproc(im_shape, im_dtype, shift_in=shift_in)
input_labels = Input(batch_shape=(None,), dtype='int64')
with tf.name_scope('net') as scope:
vv = ThetaPrime(vsize)
xx = RProjConv2D(offset_creator_class, vv, 96, kernel_size=3, strides=1, kernel_initializer='he_normal',
padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(preproc_images)
xx = RProjConv2D(offset_creator_class, vv, 96, kernel_size=3, strides=1, kernel_initializer='he_normal',
padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(xx)
xx = RProjConv2D(offset_creator_class, vv, 96, kernel_size=3, strides=2, kernel_initializer='he_normal',
padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(xx)
xx = RProjConv2D(offset_creator_class, vv, 192, kernel_size=3, strides=1, kernel_initializer='he_normal',
padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(xx)
xx = RProjConv2D(offset_creator_class, vv, 192, kernel_size=3, strides=1, kernel_initializer='he_normal',
padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(xx)
xx = RProjConv2D(offset_creator_class, vv, 192, kernel_size=3, strides=2, kernel_initializer='he_normal',
padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(xx)
xx = RProjConv2D(offset_creator_class, vv, 192, kernel_size=3, strides=1, kernel_initializer='he_normal',
padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(xx)
xx = RProjConv2D(offset_creator_class, vv, 192, kernel_size=1, strides=1, kernel_initializer='he_normal',
padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(xx)
xx = RProjConv2D(offset_creator_class, vv, 10, kernel_size=1, strides=1, kernel_initializer='he_normal',
padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(xx)
xx = GlobalAveragePooling2D()(xx)
logits = RProjDense(offset_creator_class, vv, 10, kernel_regularizer=l2(weight_decay), activation='softmax')(xx)
model = ExtendedModel(input=input_images, output=logits)
model.add_extra_trainable_weight(vv.var_2d)
nontrackable_fields = ['input_images', 'preproc_images', 'input_labels', 'logits']
for field in nontrackable_fields:
model.add_var(field, locals()[field])
make_and_add_losses(model, input_labels)
return model
vv = None
def build_test():
im_shape = (28, 28, 1)
n_label_vals = 10
im_dtype = 'float32'
input_images = Input(shape=im_shape)
global vv
if vv is None:
vv = ThetaPrime(100)
xx = input_images
xx = Flatten()(xx)
for _ in range(3):
xx = Dense(100, activation='relu')(xx)
logits = Dense(100)(xx)
logits = MultiplyLayer(vv.var)(logits)
logits = Dense(10)(logits)
model = Model(inputs=input_images, outputs=logits)
return model, vv
def build_LeNet_cifar(weight_decay=0, vsize=100, shift_in=None, proj_type='sparse'):
im_shape = (32, 32, 3)
n_label_vals = 10
im_dtype = 'float32'
assert proj_type in ('dense', 'sparse')
if proj_type == 'dense':
offset_creator_class = OffsetCreatorDenseProj
else:
# sparse
offset_creator_class = OffsetCreatorSparseProj
with tf.name_scope('inputs'):
input_images, preproc_images = make_image_input_preproc(im_shape, im_dtype, shift_in=shift_in)
input_labels = Input(batch_shape=(None,), dtype='int64')
with tf.name_scope('net') as scope:
vv = ThetaPrime(vsize)
xx = RProjConv2D(offset_creator_class, vv, 6, kernel_size=5, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(preproc_images)
xx = MaxPooling2D((2, 2))(xx)
xx = RProjConv2D(offset_creator_class, vv, 16, kernel_size=5, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(xx)
xx = MaxPooling2D((2, 2))(xx)
xx = Flatten()(xx)
# xx = Dropout(0.5)(xx)
xx = RProjDense(offset_creator_class, vv, 120, kernel_initializer='he_normal', activation='relu', kernel_regularizer=l2(weight_decay))(xx)
# xx = Dropout(0.5)(xx)
xx = RProjDense(offset_creator_class, vv, 84, kernel_initializer='he_normal', activation='relu', kernel_regularizer=l2(weight_decay))(xx)
# xx = Dropout(0.5)(xx)
logits = RProjDense(offset_creator_class, vv, 10, kernel_initializer='glorot_uniform', activation='softmax', kernel_regularizer=l2(weight_decay))(xx)
model = ExtendedModel(input=input_images, output=logits)
model.add_extra_trainable_weight(vv.var_2d)
nontrackable_fields = ['input_images', 'preproc_images', 'input_labels', 'logits']
for field in nontrackable_fields:
model.add_var(field, locals()[field])
make_and_add_losses(model, input_labels)
return model
def build_model_cifar_LeNet_fastfood(weight_decay=0, vsize=100, shift_in=None, DD=None, d_rate=0.0, c1=6, c2=16, d1=120, d2=84):
'''If DD is not specified, it will be computed.'''
im_shape = (32, 32, 3)
n_label_vals = 10
im_dtype = 'float32'
with tf.name_scope('inputs'):
input_images, preproc_images = make_image_input_preproc(im_shape, im_dtype, shift_in=shift_in)
input_labels = Input(batch_shape=(None,), dtype='int64')
def define_model(input_images, DenseLayer, Conv2DLayer):
vv = ThetaPrime(vsize)
xx = Conv2DLayer(c1, kernel_size=5, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(preproc_images)
xx = MaxPooling2D((2, 2))(xx)
xx = Conv2DLayer(c2, kernel_size=5, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(xx)
xx = MaxPooling2D((2, 2))(xx)
xx = Flatten()(xx)
xx = Dropout(d_rate)(xx)
xx = DenseLayer(d1, kernel_initializer='he_normal', activation='relu', kernel_regularizer=l2(weight_decay))(xx)
xx = Dropout(d_rate)(xx)
xx = DenseLayer(d2, kernel_initializer='he_normal', activation='relu', kernel_regularizer=l2(weight_decay))(xx)
xx = Dropout(d_rate)(xx)
logits = DenseLayer(10, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(xx)
model = ExtendedModel(input=input_images, output=logits)
nontrackable_fields = ['input_images', 'preproc_images', 'input_labels', 'logits']
for field in ['logits']:
model.add_var(field, locals()[field])
return model
if not DD:
with tf.name_scope('net_disposable'):
# Make disposable direct model
model_disposable = define_model(input_images, Dense, Conv2D)
DD = np.sum([np.prod(var.get_shape().as_list()) for var in model_disposable.trainable_weights]).item()
print(f"D {DD} {type(DD)}")
del model_disposable
with tf.name_scope('net'):
# Make real RProj FWH model
fwh_projector = FastWalshHadamardProjector(vsize, DD)
DenseLayer = lambda *args, **kwargs: RProjDense(OffsetCreatorFastfoodProj, fwh_projector, *args, **kwargs)
Conv2DLayer = lambda *args, **kwargs: RProjConv2D(OffsetCreatorFastfoodProj, fwh_projector, *args, **kwargs)
model = define_model(input_images, DenseLayer, Conv2DLayer)
fwh_projector.check_usage()
for ww in fwh_projector.trainable_weights:
model.add_extra_trainable_weight(ww)
for ww in fwh_projector.non_trainable_weights:
model.add_extra_non_trainable_weight(ww)
nontrackable_fields = ['input_images', 'preproc_images', 'input_labels']
for field in nontrackable_fields:
model.add_var(field, locals()[field])
make_and_add_losses(model, input_labels)
return model
| 18,121 | 44.762626 | 202 |
py
|
fl-analysis
|
fl-analysis-master/src/subspace/builder/test_model_builders.py
|
from unittest import TestCase
import tensorflow as tf
import numpy as np
from tf_data import Dataset
from tf_model import Model
from .model_builders import build_model_mnist_fc, build_cnn_model_mnist_bhagoji, build_test, build_cnn_model_mnist_dev_conv
from ..keras_ext.rproj_layers_util import ThetaPrime
import resource
class Test(TestCase):
def test_build_model_summary(self):
model = build_model_mnist_fc()
print('All model weights:')
# total_params = summarize_weights(model.trainable_weights)
print('Model summary:')
model.summary()
model.print_trainable_warnings()
def test_build_model_run(self):
model = build_model_mnist_fc()
(x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(1)
output = model(x_train)
accuracy = output == y_train
print(output, accuracy)
def test_build_model_get_weights(self):
model = build_model_mnist_fc()
weights = model.get_weights()
model.set_weights(weights)
# print(weights)
def test_build_model_trainable_variables(self):
model = build_model_mnist_fc()
vars = model.trainable_variables
print(vars)
def test_build_model_test_bp(self):
model, theta = build_model_mnist_fc()
(x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(24)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.001)
# model = Model.create_model("dev")
# x = tf.Variable(3.0)
# y = x * x
for i in range(10):
with tf.GradientTape() as tape:
# tape.watch(theta)
predictions = model(x_train, training=True)
loss_value = loss_object(y_true=y_train, y_pred=predictions)
# tape.watch(x)
# y = x * x
# grads = tape.gradient(y, [x])
print(loss_value)
grads = tape.gradient(loss_value, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
def test_build_model_test_conv(self):
model = build_cnn_model_mnist_dev_conv(proj_type='sparse', vsize=1000)
# model, theta = build_model_mnist_fc()
(x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(12800)
batch_size = 128
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
for i in range(10):
for bid in range(int(x_train.shape[0] / batch_size)):
batch_x = x_train[bid * batch_size:(bid + 1) * batch_size]
batch_y = y_train[bid * batch_size:(bid + 1) * batch_size]
with tf.GradientTape() as tape:
predictions = model(batch_x, training=True)
loss_value = loss_object(y_true=batch_y, y_pred=predictions)
print(loss_value)
grads = tape.gradient(loss_value, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
print(using("Sparse"), flush=True)
def test_build_model_test_timing(self):
import time
start1 = time.time()
model = build_cnn_model_mnist_bhagoji()
(x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(24)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.001)
for i in range(10):
with tf.GradientTape() as tape:
predictions = model(x_train, training=True)
loss_value = loss_object(y_true=y_train, y_pred=predictions)
print(loss_value)
grads = tape.gradient(loss_value, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
duration_sparse = time.time() - start1
start2 = time.time()
model = build_cnn_model_mnist_bhagoji(proj_type='dense')
(x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(24)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.00001)
for i in range(10):
with tf.GradientTape() as tape:
predictions = model(x_train, training=True)
loss_value = loss_object(y_true=y_train, y_pred=predictions)
print(loss_value)
grads = tape.gradient(loss_value, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
duration_dense = time.time() - start2
print(f"Done!")
print(f"Dense: {duration_dense}")
print(f"Sparse: {duration_sparse}")
def test_build_model_test_vars(self):
def run():
model, theta = build_test()
(x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(24)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
# model = Model.create_model("dev")
# x = tf.Variable(3.0)
# y = x * x
with tf.GradientTape() as tape:
# tape.watch(theta.var)
predictions = model(x_train, training=True)
# predictions = predictions * tf.norm(theta.var)
loss_value = loss_object(y_true=y_train, y_pred=predictions)
vars = model.trainable_variables + [theta.var]
grads = tape.gradient(loss_value, vars)
optimizer.apply_gradients(zip(grads, vars))
run()
def test_build_model_write_graph(self):
# tf.compat.v1.disable_eager_execution()
tf.summary.trace_on()
model = build_model_mnist_fc(depth=1)
(x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(1)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
@tf.function
def run():
with tf.GradientTape() as tape:
predictions = model(x_train, training=True)
loss_value = loss_object(y_true=y_train, y_pred=predictions)
run()
writer = tf.summary.create_file_writer("graph_debug")
with writer.as_default():
tf.summary.trace_export("graph", step=1)
# grads = tape.gradient(tf.Variable(5), model.trainable_weights)
# optimizer.apply_gradients(zip(grads, model.trainable_variables))
def using(point=""):
usage = resource.getrusage(resource.RUSAGE_SELF)
return '''%s: usertime=%s systime=%s mem=%s mb
''' % (point, usage[0], usage[1],
(usage[2] * resource.getpagesize()) / 1000000.0)
| 7,121 | 33.572816 | 123 |
py
|
fl-analysis
|
fl-analysis-master/src/subspace/builder/__init__.py
| 0 | 0 | 0 |
py
|
|
fl-analysis
|
fl-analysis-master/src/subspace/keras_ext/test_layers.py
|
#! /usr/bin/env python
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, sys
import skimage
import skimage.io
import skimage.transform
import numpy as np
import tensorflow as tf
from keras.layers import Input
import keras.backend as K
from keras.models import Sequential, Model
#pack_root = os.path.join(os.path.dirname(__file__), '..', '..')
#sys.path.insert(1, pack_root)
# extended Keras layers
from keras_layers import *
def sample_box(proposed_box, target_box,high_thresh, low_thresh,batch_size):
""" Compute Box IOU and sample positive/negative boxes and targe boxes
Input:
- proposed_box: tensor, all of the proposed boxes from RPN model.
- target_box: tensor, groudtruth box from input dataset.
- high_thresh: float, iou threshold to pick positive samples.
- low_thresh: float, iou threshold to pick negative samples.
- batch_sizes: output sample size.
Output:
- packed_pos_samples: tensor, packed with pos_samples and neg_samples.
- negative_samples: tensor.
"""
# NOTE: this function should goes to model_builder.py later.
out_iou = BoxIoU()([proposed_box, target_box])
sample_idx = BoxSamplerPosNeg(high_thresh, low_thresh, batch_size)(out_iou)
## NOTE: pos_samples is packed with pos_samples and tar_samples. Do NOT unpack here,
## otherwise keras cannot recognize the tensor size.
#packed_pos_samples = BoxSamplerPositive(high_thresh, batch_size)(
# [proposed_box, target_box,out_iou])
#neg_samples = BoxSamplerNegative(low_thresh, batch_size)([proposed_box, out_iou])
model = Model(input=[proposed_box, target_box], output=[
sample_idx])
return model
def test_box_sampling():
print('Test box sampling module ...')
# build keras model graph
in_box1 = Input(batch_shape=(1,3, 4)) # proposed box
in_box2 = Input(batch_shape=(1,2, 4)) # target box
model = sample_box(in_box1, in_box2, 0.1, 0.1, 2)
# create testing input values
in_box1_val = np.array([[20., 10., 5., 5.],
[80., 10., 5., 20.],
[80., 80., 10., 5.]])
in_box1_val = np.tile(in_box1_val, (1,1,1))
in_box2_val = np.array([[20., 10., 20., 10.],
[80., 80., 10., 10.]])
in_box2_val = np.tile(in_box2_val, (1,1,1))
# run graph
init = tf.compat.v1.initialize_all_variables()
sess = tf.compat.v1.Session()
sess.run(init)
out_vals = sess.run(model.output, feed_dict={
model.input[0]: in_box1_val,
model.input[1]: in_box2_val})
print('box sampling OK!')
def test_boxiou():
print('Test Box IOU layer...')
# build keras model graph
in_box1 = Input(batch_shape=(1,3, 4)) # proposed box
in_box2 = Input(batch_shape=(1,2, 4)) # target box
out_iou = BoxIoU()([in_box1, in_box2])
model = Model(input=[in_box1, in_box2], output=out_iou)
# create testing input values
in_box1_val = np.array([[20., 10., 5., 5.],
[80., 10., 5., 20.],
[80., 80., 10., 5.]])
in_box1_val = np.tile(in_box1_val, (1,1,1))
in_box2_val = np.array([[20., 10., 20., 10.],
[80., 80., 10., 10.]])
in_box2_val = np.tile(in_box2_val, (1,1,1))
# run graph
init = tf.compat.v1.initialize_all_variables()
sess = tf.compat.v1.Session()
sess.run(init)
out_iou_val = sess.run(model.output, feed_dict={
model.input[0]: in_box1_val,
model.input[1]: in_box2_val})
print('Box IOU OK!')
print(out_iou_val)
def test_selectpos():
print('Test SelectPosMakeTheta layer...')
in_sample_index = Input(batch_shape=(5,3)) # sample index
in_box_coords = Input(batch_shape=(6,4))
out_theta = SelectPosMakeTheta(64,64)([in_sample_index, in_box_coords])
model = Model(input=[in_sample_index, in_box_coords], output = out_theta)
# create some data
sample_index = np.array([[1, 2, 1],
[1, 0, 3],
[1, 4, 2],
[-1,1, -1],
[-1,3, -1]])
box_coords = np.array([[0., 0., 12., 14.],
[1., 2., 15., 15.],
[1.5, 2., 4., 10.],
[5., 8., 4., 10.],
[5.5, 3., 6., 8.],
[3., 4., 9., 9.]])
# run graph
init = tf.compat.v1.initialize_all_variables()
sess = tf.compat.v1.Session()
sess.run(init)
out_theta_val = sess.run(model.output, feed_dict={
model.input[0]: sample_index,
model.input[1]: box_coords})
print('SelectPosMakeTheta works!')
print(out_theta_val)
# def test_tile():
# in_x = Input(batch_shape = (1,13,13,5))
# in_y = Input(batch_shape = (12,6))
#
# out_x = TileTensorLike()([in_x, in_y])
# model = Model(input=[in_x,in_y], output=out_x)
#
# in_x_val = np.random.rand(1,13,13,5)
# in_y_val = np.random.rand(12,6)
#
# # run graph
# init = tf.compat.v1.initialize_all_variables()
# sess = tf.compat.v1.Session()
# sess.run(init)
#
# out_x_val = model([in_x_val, in_y_val])
#
# print('Tile works!')
# print(out_x_val.shape)
def run(model, inputs):
return model(inputs)
if __name__ == '__main__':
test_boxiou()
#test_box_sampling()
# test_selectpos()
# test_tile()
| 6,558 | 34.074866 | 89 |
py
|
fl-analysis
|
fl-analysis-master/src/subspace/keras_ext/engine.py
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .engine_training import ExtendedModel, LazyModel
from .engine_topology import LazyContainer
| 1,201 | 51.26087 | 80 |
py
|
fl-analysis
|
fl-analysis-master/src/subspace/keras_ext/rproj_layers_util.py
|
#! /usr/bin/env python
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Layer
# from keras.backend.tensorflow_backend import _convert_string_dtype
from tensorflow.keras import regularizers, constraints, initializers, activations
from sklearn.random_projection import SparseRandomProjection as SRP
from scipy.sparse import find
import time
import os
import sys
lab_root = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
sys.path.insert(1, lab_root)
# from ops.fwh import fast_walsh_hadamard as c_fast_walsh_hadamard
###########
#
# A quick fix for the following error
# from keras.backend.tensorflow_backend import _convert_string_dtype
# Keras 2.0.8 NameError: global name '_convert_string_dtype' is not defined
# Also called in rproj_layers.py
def _convert_string_dtype(dtype):
if dtype == 'float16':
return np.float16
if dtype == 'float32':
return np.float32
elif dtype == 'float64':
return np.float64
elif dtype == 'int16':
return np.int16
elif dtype == 'int32':
return np.int32
elif dtype == 'int64':
return np.int64
elif dtype == 'uint8':
return np.int8
elif dtype == 'uint16':
return np.uint16
else:
raise ValueError('Unsupported dtype:', dtype)
###########
class ThetaPrime(object):
def __init__(self, size):
# self.var = tf.Variable(np.random.randn(size).astype('float32'), trainable=True, name="ThetaPrime")
# self.var = tf.Variable(np.zeros((size), dtype='float32'), trainable=True, name="ThetaPrime")
self.var_2d = tf.Variable(np.zeros((1, size), dtype='float32'), trainable=True, name="ThetaPrime")
self.size = size
# class ThetaPrimeLayer(Layer):
# def __init__(self, size):
# super(ThetaPrimeLayer, self).__init__()
# self.size = size
# self.var = None
# self.var_2d = None
#
# def build(self, input_shape):
# self.var = tf.Variable(np.random.randn(self.size).astype('float32'), trainable=True, name="ThetaPrime")
# # self.var = tf.Variable(np.zeros((size), dtype='float32'), trainable=True, name="ThetaPrime")
# self.var_2d = tf.expand_dims(self.var, 0)
class MultiplyLayer(Layer):
def __init__(self, var):
super(MultiplyLayer, self).__init__()
self.var = var
def call(self, inputs, **kwargs):
return inputs * self.var
###########
#
# OffsetCreator{Dense,Sparse,Fastfood}Proj
#
# These classes create offsets. Each layer is given a projector on
# construction and uses it as needed to create weight/bias/etc
# offsets.
#
###########
class OffsetCreateDenseProjExec():
def __init__(self, weight_basis, ww, shape, name):
self.weight_basis = weight_basis
self.ww = ww
self.shape = shape
self.name = name
def __call__(self, *args, **kwargs):
return tf.reshape(tf.matmul(self.weight_basis.var_2d, self.ww, name=self.name), self.shape)
class OffsetCreatorDenseProj(object):
def __init__(self):
self.basis_matrices = []
def create_theta_offset(self, weight_basis, shape, dtype, name=None):
assert isinstance(weight_basis, ThetaPrime), 'weight_basis should be a ThetaPrime'
if isinstance(shape, tf.TensorShape):
shape = shape.as_list()
# Create projection matrix ww
total_dim = 1
for dim in shape:
assert dim is not None and dim > 0, 'dimensions must be known'
total_dim *= dim
seed = np.random.randint(10e8)
ww_shape = (weight_basis.size, total_dim)
ww_0 = np.random.normal(0.0, 1.0, size=ww_shape)
ww = tf.Variable(ww_0, trainable=False, dtype=_convert_string_dtype(dtype), name='%s_ww' % name)
return OffsetCreateDenseProjExec(weight_basis, ww, shape, name)
# theta_offset = tf.reshape(tf.matmul(weight_basis.var_2d, ww, name="MatMully"), shape)
#
# self.basis_matrices.append(ww)
#
# return theta_offset, [ww]
class OffsetCreateSparseProjExec():
def __init__(self, weight_basis, normalizer, ww, shape, name):
self.weight_basis = weight_basis
self.normalizer = normalizer
self.ww = ww
self.shape = shape
self.name = name
def __call__(self, *args, **kwargs):
# Pre-multiply the normalizer by the low-rank parameter vector to avoid a sparse matrix - sparse matrix product,
# which is not well-supported in Tensorflow (instead of theta_full = (P*N^-1)*theta_small where P*N^-1 is a row-normalized
# projection matrix, do P*(N^-1*theta_small)). (N^-1*theta_small) can be written as simply an element-wise vector division.
theta_small_norm = tf.divide(self.weight_basis.var_2d, self.normalizer)
# theta_small_norm = self.weight_basis.var_2d
# Compute delta from theta_0 using sparse projection
# Note: sparse matrix must be first argument
delta_theta_flat = tf.sparse.sparse_dense_matmul(self.ww, theta_small_norm, adjoint_a=True, adjoint_b=True)
# Create theta
theta_offset = tf.reshape(delta_theta_flat, self.shape)
return theta_offset
class OffsetCreatorSparseProj(object):
def __init__(self):
self.basis_matrices = []
self.basis_matrix_normalizers = []
def create_theta_offset(self, weight_basis, shape, dtype, name=None):
assert isinstance(weight_basis, ThetaPrime), 'weight_basis should be a ThetaPrime'
if isinstance(shape, tf.TensorShape):
shape = shape.as_list()
# Create projection matrix ww
total_dim = 1
for dim in shape:
assert dim is not None and dim > 0, 'dimensions must be known'
total_dim *= dim
# Generate location and relative scale of non zero elements
M = SRP(weight_basis.size)._make_random_matrix(weight_basis.size, total_dim)
fm = find(M)
# Create sparse projection matrix from small vv to full theta space
ww0 = tf.SparseTensor(indices=np.array([fm[0], fm[1]]).T, values=fm[2],
dense_shape=[weight_basis.size, total_dim])
ww = tf.cast(ww0, _convert_string_dtype(dtype), name="SparseyCast")
# Create diagonal normalization matrix that will be filled in when all layers are created, so that we can normalize each
# row of the projection matrix (with length equal to the total number of parameters in the model) once we have all its elements.
# This will hold the norms of the rows of the un-normalized projection matrix.
norm = tf.sqrt(tf.sparse.reduce_sum(tf.square(ww)))
# tf.sqrt(tf.add_n([tf.sparse_reduce_sum(tf.square(bm), 1) for bm in basis_matrices]))
normalizer = tf.Variable(tf.tile([norm], [weight_basis.size]),
trainable=False, name='%s_normalizer' % name)
self.basis_matrices.append(ww)
self.basis_matrix_normalizers.append(normalizer)
return OffsetCreateSparseProjExec(weight_basis, normalizer, ww, shape, name)
# # Pre-multiply the normalizer by the low-rank parameter vector to avoid a sparse matrix - sparse matrix product,
# # which is not well-supported in Tensorflow (instead of theta_full = (P*N^-1)*theta_small where P*N^-1 is a row-normalized
# # projection matrix, do P*(N^-1*theta_small)). (N^-1*theta_small) can be written as simply an element-wise vector division.
# theta_small_norm = tf.divide(weight_basis.var_2d, normalizer)
#
# # Compute delta from theta_0 using sparse projection
# # Note: sparse matrix must be first argument
# delta_theta_flat = tf.sparse.sparse_dense_matmul(ww, theta_small_norm, adjoint_a=True, adjoint_b=True)
#
# # Create theta
# theta_offset = tf.reshape(delta_theta_flat, shape)
#
# # ww0 = tf.sparse.to_dense(ww0, validate_indices=False, name="SparseyDense")
# # # ww0 = tf.Variable(ww0, trainable=False)
#
# self.basis_matrices.append(ww)
# self.basis_matrix_normalizers.append(normalizer)
#
# # Note: previous versions added only ww0 to _non_trainable_weights but skipped normalizer. Here we more correctly return both.
# # return theta_offset, [ww0]
# return theta_offset, [ww0, normalizer]
class OffsetCreateFastfoodProjExec():
def __init__(self, weight_basis, shape):
self.weight_basis = weight_basis
self.ww = []
self.shape = shape
def __call__(self, *args, **kwargs):
proj_tensor = self.weight_basis.get_projected_tensor(self.shape)
return proj_tensor
class OffsetCreatorFastfoodProj(object):
def __init__(self):
pass
def create_theta_offset(self, weight_basis, shape, dtype, name=None):
# Get offset from theta_0 (offset is initially 0)
assert isinstance(weight_basis,
FastWalshHadamardProjector), 'weight_basis should be a FastWalshHadamardProjector instance'
return OffsetCreateFastfoodProjExec(weight_basis, shape)
###########
#
# FastWalshHadamardProjector
#
# This class is instantiated once per network and manages the whole
# projection from d to D.
#
###########
class FastWalshHadamardProjector(Layer):
'''FastWalshHadamardProjector owns the d trainable parameters and
generates the D projected parameters.
FastWalshHadamardProjector must be instantiated before the model
is built with d (known) and D (possibly hard to find before model
is built). Thus some trickiness is necessary.
'''
def __init__(self, dd, DD, **kwargs):
super(FastWalshHadamardProjector, self).__init__(**kwargs)
self.dd = dd
self.DD = DD
self.index = 0
self.d_vec = self.add_weight('d_vec', (self.dd,), initializer='zeros')
self.project_vars, self.D_vec_exec = tf_fastfood_transform(self.d_vec, self.dd, self.DD)
for vv in self.project_vars:
self._non_trainable_weights.append(vv)
def get_projected_tensor(self, shape):
if isinstance(shape, tf.TensorShape):
shape = shape.as_list()
total_size = np.prod(shape)
assert self.index + total_size <= self.DD, 'Overrun D vector; requested too many projected tensors'
# ret = self.D_vec[self.index:self.index + total_size]
retflat = tf.slice(self.D_vec_exec(), [self.index], [total_size])
# print 'D_vec is', self.D_vec, 'and ret is', retflat
ret = tf.reshape(retflat, shape)
# print ' ... now ret is', ret
# print 'Sliced from %d to %d and reshaped to %s' % (self.index, total_size, repr(shape))
self.index += total_size
return ret
def check_usage(self):
if self.index == self.DD:
print('FastWalshHadamardProjector usage is perfect: %d out of %d dimensions used' % (self.index, self.DD))
else:
raise Exception(
'FastWalshHadamardProjector usage is off: %d out of %d dimensions used' % (self.index, self.DD))
###########
#
# Fast Walsh Hadamard functions
#
###########
def np_fast_walsh_hadamard(x, axis, normalize=True):
'''Compute Fast Walsh-Hadamard transform in numpy.
Args:
x: tensor of shape (a0, a1, ... aN, L, b0, b1, ..., bN).
L must be a power of two.
axis: the "L" axis above, aka the axis over which to do the
Hadamard transform. All other dimensions are left alone;
data on those dimension do not interact.
normalize: Whether to normalize the results such that applying
the transform twice returns to the original input
value. If True, return values are floats even if input was
int.
Returns:
ret: transformed tensor with same shape as x
Tests:
Wikipedia case
>>> x = np.array([1,0,1,0,0,1,1,0])
>>> np_fast_walsh_hadamard(x, 0, False)
array([ 4, 2, 0, -2, 0, 2, 0, 2])
>>> np_fast_walsh_hadamard(np_fast_walsh_hadamard(x, 0), 0)
array([ 1., 0., 1., 0., 0., 1., 1., 0.])
'''
orig_shape = x.shape
assert axis >= 0 and axis < len(orig_shape), (
'For a vector of shape %s, axis must be in [0, %d] but it is %d'
% (orig_shape, len(orig_shape) - 1, axis))
h_dim = orig_shape[axis]
h_dim_exp = int(round(np.log(h_dim) / np.log(2)))
assert h_dim == 2 ** h_dim_exp, (
'hadamard can only be computed over axis with size that is a power of two, but'
' chosen axis %d has size %d' % (axis, h_dim))
working_shape_pre = [int(np.prod(orig_shape[:axis]))] # prod of empty array is 1 :)
working_shape_post = [int(np.prod(orig_shape[axis + 1:]))] # prod of empty array is 1 :)
working_shape_mid = [2] * h_dim_exp
working_shape = working_shape_pre + working_shape_mid + working_shape_post
# print 'working_shape is', working_shape
ret = x.reshape(working_shape)
for ii in range(h_dim_exp):
dim = ii + 1
arrs = np.split(ret, 2, axis=dim)
assert len(arrs) == 2
ret = np.concatenate((arrs[0] + arrs[1], arrs[0] - arrs[1]), axis=dim)
if normalize:
ret = ret / np.sqrt(float(h_dim))
ret = ret.reshape(orig_shape)
return ret
def _fast_walsh_hadamard_one_step(xx, axis):
aa, bb = tf.split(xx, 2, axis=axis)
ret = tf.concat((aa + bb, aa - bb), axis=axis)
return ret
def _fast_walsh_hadamard_one_step_method2(xx, pre, d1, d2, d3, post):
working_shape = tf.concat((pre, d1, d2, d3, post), axis=0)
xx = tf.reshape(xx, working_shape)
aa, bb = tf.split(xx, 2, axis=2)
ret = tf.concat((aa + bb, aa - bb), axis=2)
return ret
def tf_fast_walsh_hadamard(in_x, axis, normalize=True, method='two'):
'''Compute Fast Walsh-Hadamard transform in tensorflow.
Args:
x: tensor of shape (a0, a1, ... aN, L, b0, b1, ..., bN).
L must be a power of two.
axis: the "L" axis above, aka the axis over which to do the
Hadamard transform. All other dimensions are left alone;
data on those dimension do not interact.
normalize: Whether to normalize the results such that applying
the transform twice returns to the original input
value.
method:
'one': Original reshape to [2]*ll version
'two': Deal with TF "UnimplementedError: SliceOp : Unhandled input dimensions" error...
'c': Use C++ FWH Op.
Returns:
ret: transformed tensor with same shape as x. Returned
tensor is always float even if input was int.
Tests:
>>> in_x = tf.placeholder('float32')
>>> in_x
<tf.Tensor 'Placeholder:0' shape=<unknown> dtype=float32>
>>> sess = tf.InteractiveSession()
Wikipedia case:
>>> x = np.array([1,0,1,0,0,1,1,0])
>>> sess.run(tf_fast_walsh_hadamard(in_x, 0, False), feed_dict={in_x: x})
array([ 4., 2., 0., -2., 0., 2., 0., 2.], dtype=float32)
>>> sess.run(tf_fast_walsh_hadamard(in_x, 0, False, method='two'), feed_dict={in_x: x})
array([ 4., 2., 0., -2., 0., 2., 0., 2.], dtype=float32)
>>> sess.run(tf_fast_walsh_hadamard(tf_fast_walsh_hadamard(in_x, 0), 0), feed_dict={in_x: x})
array([ 1., 0., 1., 0., 0., 1., 1., 0.], dtype=float32)
Verify equivalence with numpy approach:
>>> np.random.seed(123)
>>> x = np.random.uniform(0, 1, (3, 64, 5))
>>> h_np = np_fast_walsh_hadamard(x, 1)
>>> h_tf_ = tf_fast_walsh_hadamard(in_x, 1)
>>> h_tf2_ = tf_fast_walsh_hadamard(in_x, 1, method='two')
>>> h_tf = sess.run(h_tf_, feed_dict={in_x: x})
>>> h_tf2 = sess.run(h_tf2_, feed_dict={in_x: x})
>>> x.shape
(3, 64, 5)
>>> h_np.shape
(3, 64, 5)
>>> h_tf.shape
(3, 64, 5)
>>> h_tf2.shape
(3, 64, 5)
>>> abs(h_np - h_tf).max() < 1e-6
True
>>> abs(h_np - h_tf2).max() < 1e-6
True
Try a few other shapes / axes
>>> sess.run(tf_fast_walsh_hadamard(in_x, 0), feed_dict={in_x: x[0]}).shape == x[0].shape
True
>>> sess.run(tf_fast_walsh_hadamard(in_x, 1), feed_dict={in_x: x[:, :, 0]}).shape == x[:, :, 0].shape
True
>>> sess.run(tf_fast_walsh_hadamard(in_x, 0), feed_dict={in_x: x[0, :, 0]}).shape == x[0, :, 0].shape
True
'''
orig_shape = tf.shape(input=in_x)
h_dim = orig_shape[axis]
h_dim_exp = tf.cast(tf.round(tf.math.log(tf.cast(h_dim, dtype=tf.float32)) / np.log(2)), 'int32')
assert_pow2 = tf.compat.v1.assert_equal(h_dim, tf.pow(2, h_dim_exp),
message='hadamard can only be computed over axis with size that is a power of two')
with tf.control_dependencies([assert_pow2]):
working_shape_pre = tf.expand_dims(tf.reduce_prod(input_tensor=orig_shape[:axis]),
axis=0) # reduce_prod of empty array is 1
working_shape_post = tf.expand_dims(tf.reduce_prod(input_tensor=orig_shape[axis + 1:]),
axis=0) # reduce_prod of empty array is 1
ii = tf.constant(0)
assert method in ('one', 'two', 'c')
if method == 'one':
# expand to working dims [pre, 2, 2, 2, ..., 2, 2, post]
working_shape_mid = tf.tile([2], [h_dim_exp])
working_shape = tf.concat((working_shape_pre, working_shape_mid, working_shape_post),
axis=0)
ret_0 = tf.reshape(in_x, working_shape)
cond = lambda i, x: tf.less(i, h_dim_exp)
body = lambda i, x: (tf.add(i, 1), _fast_walsh_hadamard_one_step(x, i + 1))
ii_final, ret = tf.while_loop(
cond=cond,
body=body,
loop_vars=[ii, ret_0],
parallel_iterations=1 # check on this?
)
elif method == 'two':
# Never expand to high rank. Roll dimensions instead. This is
# needed because backprop through the slice operator only
# supports up to rank 7 tensors in TF 1.3
# [pre, 1, 2, h_dim/2, post] ->
# [pre, 2, 2, h_dim/4, post] -> ...
# [pre, h_dim/2, 2, 1, post]
d1 = tf.expand_dims(tf.constant(1), axis=0)
d2 = tf.expand_dims(tf.constant(2), axis=0) # always 2
d3 = tf.expand_dims(tf.math.floordiv(h_dim, 2), axis=0)
working_shape_0 = tf.concat((working_shape_pre, d1, d2, d3, working_shape_post), axis=0)
ret_0 = tf.reshape(in_x, working_shape_0)
cond = lambda i, d1, d3, x: tf.less(i, h_dim_exp)
body = lambda i, d1, d3, x: (tf.add(i, 1),
d1 * 2,
tf.math.floordiv(d3, 2),
_fast_walsh_hadamard_one_step_method2(x, working_shape_pre, d1, d2, d3,
working_shape_post))
ii_final, d1_final, d3_final, ret = tf.while_loop(
cond=cond,
body=body,
loop_vars=[ii, d1, d3, ret_0],
parallel_iterations=1 # check on this?
)
else:
# 'c' version
# Only works for rank-1 (vector) input
assert False, 'c version disabled for now'
assert axis == 0, 'axis must be 0 for the c version of tf_fast_walsh_hadamard'
assert normalize, 'for c version normalize must be True'
assert_rank1 = tf.compat.v1.assert_rank(in_x, 1)
with tf.control_dependencies([assert_rank1, assert_pow2]):
ret = c_fast_walsh_hadamard(in_x)
if normalize and method != 'c':
ret = ret / tf.sqrt(tf.cast(h_dim, dtype=tf.float32))
ret = tf.reshape(ret, orig_shape)
return ret
def tf_fastfood_transform(in_x, dd, DD, use_get=False, use_C=False):
'''Transform from d to D. Pads as necessary.
For now: assume dd and DD are known in python.'''
# Tensor d and D
# assert_D_big = tf.assert_greater_equal(DD, dd, message='d cannot be larger than D')
# with tf.control_dependencies([assert_D_big]):
# ll = tf.cast(tf.round(tf.log(tf.to_float(DD)) / np.log(2)), 'int32')
# LL = tf.pow(2, ll)
# Python d and D
assert isinstance(dd, int), 'd should be int'
assert isinstance(DD, int), 'D should be int'
assert DD >= dd, 'd cannot be larger than D'
assert dd > 0, 'd and D must be positive'
ll = int(np.ceil(np.log(DD) / np.log(2)))
LL = 2 ** ll
# Make vars
init_BB = tf.cast(tf.random.uniform((LL,), 0, 2, dtype='int32'), dtype=tf.float32) * 2 - 1
init_Pi = tf.random.shuffle(tf.range(LL))
init_GG = tf.random.normal((LL,))
init_divisor = lambda GG: tf.sqrt(LL * tf.reduce_sum(input_tensor=tf.pow(GG.initialized_value(), 2)))
if use_get:
BB = tf.compat.v1.get_variable('B', initializer=init_BB, trainable=False)
Pi = tf.compat.v1.get_variable('Pi', initializer=init_Pi, trainable=False)
GG = tf.compat.v1.get_variable('G', initializer=init_GG, trainable=False)
divisor = tf.compat.v1.get_variable('divisor', initializer=init_divisor(GG), trainable=False)
else:
BB = tf.Variable(init_BB, name='B', trainable=False)
Pi = tf.Variable(init_Pi, name='Pi', trainable=False)
GG = tf.Variable(init_GG, name='G', trainable=False)
divisor = tf.Variable(init_divisor(GG), name='divisor', trainable=False)
fastfood_vars = [BB, Pi, GG, divisor]
ret = FastfoodExec(in_x, BB, Pi, GG, LL, ll, DD, dd, divisor, use_C)
return fastfood_vars, ret
class FastfoodExec():
def __init__(self, in_x, BB, Pi, GG, LL, ll, DD, dd, divisor, use_C):
self.in_x = in_x
self.BB = BB
self.Pi = Pi
self.GG = GG
self.LL = LL
self.ll = ll
self.DD = DD
self.dd = dd
self.divisor = divisor
self.use_C = use_C
def __call__(self, *args, **kwargs):
# Implement transform
dd_pad = tf.pad(tensor=self.in_x, paddings=[[0, self.LL - self.dd]])
mul_1 = tf.multiply(self.BB, dd_pad)
if self.use_C:
mul_2 = tf_fast_walsh_hadamard(mul_1, 0, method='c', normalize=True)
else:
mul_2 = tf_fast_walsh_hadamard(mul_1, 0, method='two', normalize=False)
mul_3 = tf.gather(mul_2, self.Pi)
mul_4 = tf.multiply(mul_3, self.GG)
if self.use_C:
mul_5 = tf_fast_walsh_hadamard(mul_4, 0, method='c', normalize=True)
print('\nWARNING: check normalization on this next line more carefully\n')
ret = tf.divide(tf.slice(mul_5, [0], [self.DD]), self.divisor * np.sqrt(float(self.DD) / self.LL / self.ll))
else:
mul_5 = tf_fast_walsh_hadamard(mul_4, 0, method='two', normalize=False)
ret = tf.divide(tf.slice(mul_5, [0], [self.DD]), self.divisor * np.sqrt(float(self.DD) / self.LL))
return ret
def test_timing():
N = 29
in_x = tf.compat.v1.placeholder('float32')
sum_x = tf.reduce_sum(input_tensor=in_x)
hh = tf_fast_walsh_hadamard(in_x, 1, True)
sum_h = tf.reduce_sum(input_tensor=hh)
sess = tf.compat.v1.InteractiveSession()
for ll in range(1, N):
L = 2 ** ll
print('\n%d, H dim %d' % (ll, L))
x = np.random.uniform(0, 1, (1, L, 1))
if L < 33554432:
start = time.time()
np_fast_walsh_hadamard(x, 1)
end = time.time()
print(' np %14s elems: %16s' % ('%d' % L, '%f' % (end - start)))
else:
print(' np <skipped>')
start = time.time()
sess.run(sum_h, feed_dict={in_x: x})
end = time.time()
print(' tf %14s elems: %16s' % ('%d' % L, '%f' % (end - start)))
# Time each op the third time (ignore CUDA tuning time) then subtract data transfer time
sess.run(sum_x, feed_dict={in_x: x})
sess.run(sum_x, feed_dict={in_x: x})
start = time.time()
sess.run(sum_x, feed_dict={in_x: x})
elap_data = time.time() - start
sess.run(sum_h, feed_dict={in_x: x})
sess.run(sum_h, feed_dict={in_x: x})
start = time.time()
sess.run(sum_h, feed_dict={in_x: x})
elap_had = time.time() - start
print(' tf just H %14s elems: %16s' % ('%d' % (L), '%f' % (elap_had - elap_data)))
DD = max(5, int(np.ceil(L * .8)))
dd = max(3, int(np.ceil(DD * .001)))
if x.shape[1] >= dd:
for use_C in [False, True]:
st = '(C) ' if use_C else '(TF)'
ffvars, xform = tf_fastfood_transform(in_x, dd, DD, use_C=use_C)
sum_xf = tf.reduce_sum(input_tensor=xform)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(sum_xf, feed_dict={in_x: x[0, :dd, 0]})
start = time.time()
sess.run(sum_xf, feed_dict={in_x: x[0, :dd, 0]})
end = time.time()
print(' tf %s fastf %14s elems: %16s' % (st, '%d' % L, '%f' % (end - start)))
sess.run(sum_x, feed_dict={in_x: x[0, :dd, 0]})
sess.run(sum_x, feed_dict={in_x: x[0, :dd, 0]})
start = time.time()
sess.run(sum_x, feed_dict={in_x: x[0, :dd, 0]})
elap_data = time.time() - start
sess.run(sum_xf, feed_dict={in_x: x[0, :dd, 0]})
sess.run(sum_xf, feed_dict={in_x: x[0, :dd, 0]})
start = time.time()
sess.run(sum_xf, feed_dict={in_x: x[0, :dd, 0]})
elap_had = time.time() - start
print(' tf %s just fastf%14s elems: %16s' % (st, '%d' % (L), '%f' % (elap_had - elap_data)))
else:
print(' tf fastfood %14s elems: <skipped, too small>' % ('%d' % L))
if L > 32768:
print(' <skipped large batch cases>')
continue
x2 = np.random.uniform(0, 1, (10, L, 100))
start = time.time()
np_fast_walsh_hadamard(x2, 1)
end = time.time()
print(' np %14s elems: %16s' % ('%d' % (L * 1000), '%f' % (end - start)))
start = time.time()
sess.run(sum_h, feed_dict={in_x: x2})
end = time.time()
print(' tf %14s elems: %16s' % ('%d' % (L * 1000), '%f' % (end - start)))
# Time each op the third time (ignore CUDA tuning time) then subtract data transfer time
sess.run(sum_x, feed_dict={in_x: x2})
sess.run(sum_x, feed_dict={in_x: x2})
start = time.time()
sess.run(sum_x, feed_dict={in_x: x2})
elap_data = time.time() - start
sess.run(sum_h, feed_dict={in_x: x2})
sess.run(sum_h, feed_dict={in_x: x2})
start = time.time()
sess.run(sum_h, feed_dict={in_x: x2})
elap_had = time.time() - start
print(' tf just H %14s elems: %16s' % ('%d' % (L * 1000), '%f' % (elap_had - elap_data)))
print('The next dim, 2**29 ==', 2 ** 29, 'crashes with OOM on a TitanX')
if __name__ == '__main__':
import doctest
doctest.testmod()
test_timing()
| 28,439 | 36.970628 | 136 |
py
|
fl-analysis
|
fl-analysis-master/src/subspace/keras_ext/engine_training.py
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras import Input
from tensorflow.python.keras import backend
from src.subspace.general.util import DotDict
from .util import full_static_shape
class ExtendedModel(Model):
'''Slight extensions of the Keras model class.'''
def __init__(self, input, output, name=None):
super(ExtendedModel, self).__init__(input, output, name=name)
self.v = DotDict()
#self._vars = OrderedDict()
self._trackable = set()
self._extra_trainable_weights = []
self._extra_non_trainable_weights = []
def add_loss_reg(self):
'''Adds losses for all attached regularizers'''
# New Keras interface for regularization / etc layer losses
losses = []
for loss in self.losses:
if loss is None or loss == 0 or loss == 0.0:
continue
losses.append(loss)
if len(losses) > 0:
print('Regularizer and other internal losses from model: %d losses' % len(losses))
for loss in losses:
print(' loss var=%s' % loss)
self.add_trackable('loss_reg', tf.add_n(losses, name='loss_reg'))
if 'loss_reg' not in self.v:
print('Regularizer and other internal losses from model: none to add.')
def add_var(self, name_or_var, var=None, trackable=False):
'''Call like self.add_var('name', var) or self.add_var(var) to use var.name as name.'''
if var is None:
var = name_or_var
name = var.name
else:
name = name_or_var
self.v[name] = var
if trackable:
self._trackable.add(name)
elif name in self._trackable:
self._trackable.remove(name)
def add_vars(self, names_or_vars, varss=None, trackable=False):
'''Call with:
- one list of vars
- equal length lists of names and vars
- dict of name: var pairs
'''
if isinstance(names_or_vars, dict):
for name,var in names_or_vars.items():
self.add_var(name, var, trackable=trackable)
elif varss is None:
for var in names_or_vars:
self.add_var(var, var=None, trackable=trackable)
else:
assert len(names_or_vars) == len(varss), 'should be two lists of equal length'
for name,var in zip(names_or_vars, varss):
self.add_var(name, var, trackable=trackable)
def add_trackable(self, name_or_var, var=None):
self.add_var(name_or_var, var=var, trackable=True)
def add_trackables(self, names_or_vars, varss=None):
self.add_vars(names_or_vars, varss=varss, trackable=True)
def del_var(self, name):
'''Remove var if it exists'''
if name in self.v:
del self.v[name]
if name in self._trackable:
self._trackable.remove(name)
@property
def var_names(self):
return list(self.v.keys())
@property
def trackable_names(self):
return [k for k in self.var_names if k in self._trackable]
@property
def vars(self):
return self.get_vars()
def get_vars(self, var_names=None):
if var_names is None:
var_names = self.var_names
return [self.v[name] for name in var_names]
@property
def tensors(self):
return self.get_tensors()
def get_tensors(self, tensor_names=None):
return [vv for vv in self.get_vars(var_names=tensor_names) if isinstance(vv, tf.Tensor)]
def get_weights(self):
return backend.batch_get_value(self.extra_trainable_weights)
def set_weights(self, weights):
# super(Model, self).set_weights(weights)
assert len(weights) == 1, f"Can only have single weight for thetaprime! {weights}"
thetaPrime = self.extra_trainable_weights[0]
tuple = [(thetaPrime, weights[0])]
backend.batch_set_value(tuple)
def set_all_weights(self, weights):
super(Model, self).set_weights(weights)
@property
def trackable_vars(self):
return [self.v[k] for k in self.var_names if k in self._trackable]
@property
def trackable_dict(self):
return self.get_tensor_dict(self.trackable_names)
@property
def update_dict(self):
return {'update__%d' % ii: update for ii, update in enumerate(self.updates)}
@property
def trackable_and_update_dict(self):
'''Returns a dict of all trackables and updates. Useful for
training when you want to fetch all trackables and also ensure
any updates (e.g. for rolling average BatchNormalization
layers) are fetched.
'''
ret = self.trackable_dict
ret.update(self.update_dict)
return ret
def get_tensor_dict(self, tensor_names=None):
if tensor_names is None:
tensor_names = self.var_names
filtered_names = [nn for nn in tensor_names if isinstance(self.v[nn], tf.Tensor)]
return {kk:self.v[kk] for kk in filtered_names}
def print_trainable_warnings(self, graph=None):
'''Print warnings for any vars marked as trainable in the
model but not graph, and vice versa. A common case where this
occurs is in BatchNormalization layers, where internal
variables are updated but not marked as trainable.
'''
if graph is None:
try:
graph = tf.python.get_default_graph()
except AttributeError:
graph = tf.compat.v1.get_default_graph()
def tag(name):
if 'batchnormalization' in name and 'running' in name:
# Keras 1.2.2
return ' . '
elif 'batch_normalization' in name and 'moving' in name:
# Keras 2+
return ' . '
else:
return '***'
# Check which vars are trainable
trainable_vars_from_graph = graph.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
trainable_vars_from_model = self.trainable_weights
in_graph_not_model = set(trainable_vars_from_graph).difference(set(trainable_vars_from_model))
if in_graph_not_model:
print('Warning: the following vars are marked as trainable in the graph but not in model.trainable_weights (typical for BatchNormalization layers. "." if expected, "***" if not):')
print('\n'.join([' %4s %s: %s' % (tag(vv.name), vv.name, vv) for vv in in_graph_not_model]))
in_model_not_graph = set(trainable_vars_from_model).difference(set(trainable_vars_from_graph))
if in_model_not_graph:
print('Warning: the following vars are in model.trainable_weights but not marked as trainable in the graph:')
print('\n'.join([' %4s %s: %s' % (tag(vv.name), vv.name, vv) for vv in in_model_not_graph]))
def add_extra_trainable_weight(self, weight):
self._extra_trainable_weights.append(weight)
@property
def extra_trainable_weights(self):
return self._extra_trainable_weights
@property
def trainable_weights(self):
tw = super(ExtendedModel, self).trainable_weights
# tw.extend(self.extra_trainable_weights)
return tw
def add_extra_non_trainable_weight(self, weight):
self._extra_non_trainable_weights.append(weight)
@property
def extra_non_trainable_weights(self):
return self._extra_non_trainable_weights
@property
def non_trainable_weights(self):
ntw = super(ExtendedModel, self).non_trainable_weights
ntw.extend(self.extra_non_trainable_weights)
return ntw
class LazyModel(ExtendedModel):
'''Like ExtendedModel. But lazy and nestable.
In general, we would like to be able to encapsulate functionality
in larger containers than single layers. However, this is
difficult because when using the standard Model (and
ExtendedModel), you must know the input shape in order to make a
placeholder Input layer. This is far less convenient than, say,
just being able to call a Dense(123) layer on an input of unknown
width and having the shape inferred at build time. LazyModel
solves this problem by delaying the model build until the first
time it is actually called on a real node in the graph, at which
point an internal Input layer is constructed on the fly (and
generally then not used).
Known issues:
- BatchNormalization layers fail in mode 0 (because they are
called twice). Workaround: use in mode 1 or 2 or outside
LazyModel.
- Layer activity_regularizers do not work well, because then
there end up being two copies (one on the activation resulting
from the internal Input layer). Workaround: use
activity_regularizers only outside the LazyModel.
- There still ends up being a dangling tf.placeholder in the
graph. See notes in exp/model_keras_hacking/ for failed
more elegant solutions.
'''
def __init__(self, model_function):
self._model_function = model_function
self._lazy_has_run = False
# Delay rest of construction until first call
def __call__(self, inputs, mask=None):
if not self._lazy_has_run:
input_was_list_tuple = isinstance(inputs, list) or isinstance(inputs, tuple)
if input_was_list_tuple:
input_list = inputs
else:
input_list = [inputs]
# Make short-lived Input Layers for each x this was called with
input_layers = []
warn_prefix = 'if_you_get_a_must_feed_placeholder_error_here_it_is_because_you_used_an_activity_regularizer._ask_jason'
for inp in input_list:
#ll = Input(tensor=inp, batch_shape=inp._keras_shape, dtype=inp.dtype, name='real_input_from__%s' % inp.name.replace('/','_').replace(':','_'))
#ll = Input(batch_shape=inp.get_shape().as_list(), dtype=inp.dtype, name='%s.%s' % (warn_prefix, inp.name.replace('/','_').replace(':','_')))
shape = full_static_shape(inp)
ll = Input(batch_shape=shape, dtype=inp.dtype, name='%s.%s' % (warn_prefix, inp.name.replace('/','_').replace(':','_')))
input_layers.append(ll)
if not input_was_list_tuple:
input_layers = input_layers[0]
# Call function of inputs to get output tensors
# And then initialize the entire model.
outputs = self._model_function(input_layers)
super(LazyModel, self).__init__(input_layers, outputs)
self._lazy_has_run = True
# Now actually call the model and return the outputs
return super(LazyModel, self).__call__(inputs, mask=mask)
| 12,035 | 39.12 | 192 |
py
|
fl-analysis
|
fl-analysis-master/src/subspace/keras_ext/engine_topology.py
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from tensorflow.keras.layers import Input
from keras.engine.topology import Network as Container
from .util import full_static_shape
class LazyContainer(Container):
'''Like Container. But lazy.'''
def __init__(self, container_function, use_method_disposable=True):
self._container_function = container_function
self._lazy_has_run = False
self.use_method_disposable = use_method_disposable
# Delay rest of construction until first call
def __call__(self, x, mask=None):
if not self._lazy_has_run:
# Make short-lived Input Layers for each x this was called with
# TODO: handle tuple or list x
x_shape = full_static_shape(x) # Uses var._keras_shape or var.get_shape()
if self.use_method_disposable:
inp_layer = Input(batch_shape=x_shape,
dtype=x.dtype,
name='tmp_input_from__%s' % x.name.replace('/','_').replace(':','_'))
else:
print('Warning: using non-disposable approach. May not work yet.')
inp_layer = Input(tensor=x,
batch_shape=x_shape,
dtype=x.dtype, name='real_input_from__%s' % x.name.replace('/','_').replace(':','_'))
# Call function of inputs to get output tensors
outputs = self._container_function(inp_layer)
# Initialize entire Container object here (finally)
super(LazyContainer, self).__init__(inp_layer, outputs)
self._lazy_has_run = True
if not self.use_method_disposable:
return outputs
# Non-disposable mode: actually call the Container only the *second* and later times
# Disposable mode: call the Container now
ret = super(LazyContainer, self).__call__(x, mask=mask)
return ret
| 3,056 | 45.318182 | 119 |
py
|
fl-analysis
|
fl-analysis-master/src/subspace/keras_ext/layers.py
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
''' Extended Keras Layers
'''
from .rproj_layers import RProjDense, RProjConv2D, RProjBatchNormalization, RProjLocallyConnected2D
| 1,235 | 48.44 | 99 |
py
|
fl-analysis
|
fl-analysis-master/src/subspace/keras_ext/util.py
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Flatten, Input, Lambda
from src.subspace.general.tfutil import tf_assert_gpu, hist_summaries_traintest
########################
# General Keras helpers
########################
def make_image_input_preproc(im_dims, dtype='float32', flatten_in=False, shift_in=None, name=None):
'''Make an input for images and (optionally preprocess). Returns
both the Input layer (which should be used as Model input) and the
preproc version (which should be passed to the first layer of the
model). If no preprocessing is done, the Input layer and preproc
will be the same.
'''
assert isinstance(im_dims, tuple) and len(im_dims) == 3, 'should be tuple of 3 dims (0,1,c)'
assert dtype in ('float32', 'uint8'), 'unknown dtype'
input_images = Input(shape=im_dims, dtype=dtype, name=name)
preproc_images = input_images
if dtype == 'uint8':
preproc_images = Lambda(lambda x: K.cast(x, 'float32'))(preproc_images)
if shift_in is not None:
print('subtracting from each input:', shift_in)
preproc_images = Lambda(lambda x: x - shift_in)(preproc_images)
if flatten_in:
preproc_images = Flatten()(preproc_images)
return input_images, preproc_images
def make_classlabel_input(n_label_vals):
return Input(batch_shape=(None,), dtype='int64')
def setup_session_and_seeds(seed, assert_gpu=True, mem_fraction=None):
'''Start TF and register session with Keras'''
# Use InteractiveSession instead of Session so the default session will be set
if mem_fraction is not None:
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=mem_fraction)
sess = tf.compat.v1.InteractiveSession(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options))
else:
sess = tf.compat.v1.InteractiveSession()
K.set_session(sess)
np.random.seed(seed)
tf.compat.v1.set_random_seed(seed)
print('Set numpy and tensorflow random seeds to: %s' % repr(seed))
print('My PID is %d' % os.getpid())
if assert_gpu:
tf_assert_gpu(sess)
return sess
def add_act_summaries(model, quiet=False):
tensors = []
if not quiet:
print('\nActivations:')
for layer in model.layers:
for node in layer._inbound_nodes:
for tensor in node.output_tensors:
tensors.append(tensor)
tdict = {tt.name: tt for tt in set(tensors)}
for tname in sorted(tdict.keys()):
hist_summaries_traintest(tdict[tname], name=tname + '__act')
if not quiet:
print(' ', tname, tdict[tname])
def get_model_tensors(model, with_layers_nodes=False):
tensor_set = set()
tensor_list = []
layer_list = []
node_list = []
for layer in model.layers:
for node in layer._inbound_nodes:
for tensor in node.output_tensors:
if tensor not in tensor_set:
# Make a list with deteministic order, but check membership using a fast set
tensor_set.add(tensor)
tensor_list.append(tensor)
layer_list.append(layer)
node_list.append(node)
if with_layers_nodes:
return tensor_list, layer_list, node_list
else:
return tensor_list
def warn_misaligned_shapes(model):
printed = False
tlns = get_model_tensors(model, with_layers_nodes=True)
for tln in zip(tlns[0], tlns[1], tlns[2]):
tensor, layer, node = tln
tf_shape = tuple(tensor.get_shape().as_list())
try:
keras_shape = tensor._keras_shape
except AttributeError:
continue
if tf_shape != keras_shape:
if not printed:
print('\nWarning: found the following tensor shape mismatches, may indicate problems.')
print(' %-40s %-22s %-22s' % ('LAYER NAME', '', ''))
print(' %-40s %-22s %-22s' % ('TENSOR NAME', 'KERAS SHAPE', 'TF SHAPE'))
printed = True
print(' %-40s %-22s %-22s' % (layer.name, '', ''))
print(' %-40s %-22s %-22s' % (tensor.name, keras_shape, tf_shape))
def full_static_shape(var):
'''Returns the most fully-specified static shape possible for var (at
graph build time, not run time). Uses information in
var.get_shape() as well as var._keras_shape. Raises an Exception
if the two shapes are incompatible with each other.
'''
try:
tf_shape = [val.__int__() for val in var.get_shape()]
except ValueError:
raise Exception('Unclear why this would ever be encountered. If it pops up, debug here.')
if not hasattr(var, '_keras_shape'):
return tf_shape
k_shape = var._keras_shape
assert len(tf_shape) == len(k_shape), 'Shape lengths different; this probably should not occur'
shape = []
for tt, kk in zip(tf_shape, k_shape):
if tt == kk:
shape.append(tt)
else:
if tt is None:
shape.append(kk)
elif kk is None:
shape.append(tt)
else:
raise Exception('tf shape and Keras shape are contradictory: %s vs %s' % (tf_shape, k_shape))
return shape
| 6,451 | 38.10303 | 109 |
py
|
fl-analysis
|
fl-analysis-master/src/subspace/keras_ext/regularizers.py
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''Custom Keras regularizers.'''
import tensorflow.keras
import tensorflow.keras.backend as K
class WeightRegularizer(keras.regularizers.WeightRegularizer):
'''Subclass of Keras WeightRegularizer that doesn't use
K.in_train_phase, so that total loss can easily be compared
between train and val modes.
'''
def __init__(self, l1=0., l2=0.):
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
self.uses_learning_phase = False
self.p = None
def get_loss(self):
loss = 0.0
if self.l1:
loss += K.sum(K.abs(self.p)) * self.l1
if self.l2:
loss += K.sum(K.square(self.p)) * self.l2
return loss
class WeightRegularizerMean(keras.regularizers.WeightRegularizer):
'''Subclass of Keras WeightRegularizer that doesn't use
K.in_train_phase, so that total loss can easily be compared
between train and val modes.
Uses mean instead of sum above.
'''
def __init__(self, l1=0., l2=0.):
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
self.uses_learning_phase = False
self.p = None
def get_loss(self):
loss = 0.0
if self.l1:
loss += K.mean(K.abs(self.p)) * self.l1
if self.l2:
loss += K.mean(K.square(self.p)) * self.l2
return loss
class ActivityRegularizer(keras.regularizers.ActivityRegularizer):
'''Subclass of Keras ActivityRegularizer that doesn't use
K.in_train_phase, so that total loss can easily be compared
between train and val modes.
'''
def __init__(self, l1=0., l2=0.):
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
self.uses_learning_phase = False
self.layer = None
def get_loss(self):
if self.layer is None:
raise Exception('Need to call `set_layer` on '
'ActivityRegularizer instance '
'before calling the instance.')
loss = 0.0
for i in range(len(self.layer.inbound_nodes)):
output = self.layer.get_output_at(i)
if self.l1:
loss += K.sum(self.l1 * K.abs(output))
if self.l2:
loss += K.sum(self.l2 * K.square(output))
return loss
def l1(l=0.01):
return WeightRegularizer(l1=l)
def l2(l=0.01):
return WeightRegularizer(l2=l)
def l1l2(l1=0.01, l2=0.01):
return WeightRegularizer(l1=l1, l2=l2)
def activity_l1(l=0.01):
return ActivityRegularizer(l1=l)
def activity_l2(l=0.01):
return ActivityRegularizer(l2=l)
def activity_l1l2(l1=0.01, l2=0.01):
return ActivityRegularizer(l1=l1, l2=l2)
| 3,830 | 30.925 | 80 |
py
|
fl-analysis
|
fl-analysis-master/src/subspace/keras_ext/__init__.py
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
| 1,104 | 51.619048 | 80 |
py
|
fl-analysis
|
fl-analysis-master/src/subspace/keras_ext/rproj_layers.py
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Layer, InputSpec
import tensorflow.keras.backend as K
from tensorflow.python.keras.utils import conv_utils
from src.subspace.keras_ext.rproj_layers_util import _convert_string_dtype
# from keras.backend.tensorflow_backend import _convert_string_dtype
from tensorflow.keras import regularizers, constraints, initializers, activations
###########
#
# Low Rank Basis Layers
#
# These layers are modified versions of standard Keras layers that
# accept an OffsetCreator*Proj to create offsets from a weight basis
# in a Dense/Sparse/Fastfood agnostic manner.
#
###########
class LowRankBasisLayer(Layer):
'''Smarter version of Layer...'''
def __init__(self, offset_creator_class, weight_basis, *args, **kwargs):
super(LowRankBasisLayer, self).__init__(*args, **kwargs)
# offset_creator is an object that creates theta offsets
self.offset_creator = offset_creator_class()
self.weight_basis = weight_basis
# These may or may not be used by subclasses
#self._basis_matrices = []
#self._basis_matrix_normalizers = []
# TODO check for use of basis_matrices
@property
def basis_matrices(self):
print('USED HERE basis_matrices')
return self._basis_matrices
# TODO check for use of basis_matrix_normalizers
@property
def basis_matrix_normalizers(self):
print('USED HERE basis_matrix_normalizers')
return self._basis_matrix_normalizers
def add_weight(self,
name,
shape,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
constraint=None):
'''Version of add_weight that creates a weight theta by instantiating
theta_0 and then adding to it an offset from the member
offset_creator.
'''
initializer = initializers.get(initializer)
if dtype is None:
dtype = K.floatx()
# Create Theta_0
value_0 = initializer(shape)
theta_0 = tf.Variable(value_0, trainable=False, dtype=_convert_string_dtype(dtype), name='%s_theta0' % name)
if isinstance(value_0, np.ndarray):
theta_0._keras_shape = value_0.shape
elif hasattr(value_0, 'get_shape'):
theta_0._keras_shape = tuple(map(int, value_0.get_shape()))
theta_0._uses_learning_phase = False
# Call offset creator
exec = self.offset_creator.create_theta_offset(self.weight_basis,
theta_0.get_shape(),
dtype=dtype,
name=name)
non_trainable_weights = exec.ww
# if regularizer is not None:
# self.add_loss(regularizer(theta))
# if constraint is not None:
# self.constraints[theta] = constraint
#self._base_thetas.append(theta_0)
#self._basis_matrices.append(ww)
#self._non_trainable_weights.extend([theta_0, ww])
self._non_trainable_weights.extend([theta_0] + [non_trainable_weights])
return theta_0, exec
def add_non_trainable_weight(self,
name,
shape,
dtype=None,
initializer=None,
regularizer=None,
constraint=None):
'''Adds a weight variable to the layer.
# Arguments
name: String, the name for the weight variable.
shape: The shape tuple of the weight.
dtype: The dtype of the weight.
initializer: An Initializer instance (callable).
regularizer: An optional Regularizer instance.
trainable: A boolean, whether the weight should
be trained via backprop or not (assuming
that the layer itself is also trainable).
constraint: An optional Constraint instance.
# Returns
The created weight variable.
'''
initializer = initializers.get(initializer)
if dtype is None:
dtype = K.floatx()
weight = tf.Variable(initializer(shape), dtype=dtype, name=name, trainable=False)
# weight = K.variable(initializer(shape), dtype=dtype, name=name)
if regularizer is not None:
self.add_loss(regularizer(weight))
if constraint is not None:
self.constraints[weight] = constraint
self._non_trainable_weights.append(weight)
return weight
class RProjDense(LowRankBasisLayer):
'''RProj version of Dense.'''
def __init__(self, offset_creator_class, weight_basis,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(RProjDense, self).__init__(offset_creator_class, weight_basis, **kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def call(self, inputs, **kwargs):
kt0, eproj = self.kernel
k = tf.add(kt0, eproj())
bt0, eprojb = self.bias
b = tf.add(bt0, eprojb())
# Normal dense functionality
output = K.dot(inputs, k)
if self.use_bias:
output = K.bias_add(output, b)
if self.activation is not None:
output = self.activation(output)
return output
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) >= 2
assert input_shape[-1]
output_shape = list(input_shape)
output_shape[-1] = self.units
return tuple(output_shape)
class _RProjConv(LowRankBasisLayer):
'''Abstract nD convolution layer (private, used as implementation base).
Only the intrinsic parameters (RProj) are Trainable.'''
def __init__(self, offset_creator_class, weight_basis,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(_RProjConv, self).__init__(offset_creator_class, weight_basis, **kwargs)
self.rank = rank
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
if self.rank == 1:
outputs = K.conv1d(
inputs,
self.kernel,
strides=self.strides[0],
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate[0])
if self.rank == 2:
outputs = K.conv2d(
inputs,
self.kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.rank == 3:
outputs = K.conv3d(
inputs,
self.kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0],) + tuple(new_space) + (self.filters,)
if self.data_format == 'channels_first':
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0], self.filters) + tuple(new_space)
class RProjConv2D(_RProjConv):
'''Low Rank Basis Conv2D
Filters if number of filters, output dimension is filters
TODO: Documentation / unit tests
'''
def __init__(self, offset_creator_class, weight_basis,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(RProjConv2D, self).__init__(
offset_creator_class=offset_creator_class,
weight_basis=weight_basis,
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
self.input_spec = InputSpec(ndim=4)
def build(self, input_shape):
assert self.data_format != 'channels_first','only b01c supported'
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[-1]
self.units = self.filters
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
assert self.rank == 2, 'only conv2d supported for now...'
kt0, eproj = self.kernel
k = tf.add(kt0, eproj())
bt0, eprojb = self.bias
b = tf.add(bt0, eprojb())
if self.rank == 2:
outputs = K.conv2d(
inputs,
k,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
b,
data_format=self.data_format)
#if self.activation is not None:
# assert False,'activation functions not supported'
# return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
#self.filters*2 to accomodate LU representation
return (input_shape[0],) + tuple(new_space) + (self.filters,)
class RProjBatchNormalization(LowRankBasisLayer):
'''RProj version of BatchNormalization.'''
def __init__(self, offset_creator_class, weight_basis,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
moving_mean_initializer='zeros',
moving_variance_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
super(RProjBatchNormalization, self).__init__(offset_creator_class, weight_basis, **kwargs)
self.supports_masking = True
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.moving_mean_initializer = initializers.get(moving_mean_initializer)
self.moving_variance_initializer = initializers.get(moving_variance_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
def build(self, input_shape):
dim = input_shape[self.axis]
if dim is None:
raise ValueError('Axis ' + str(self.axis) + ' of '
'input tensor should have a defined dimension '
'but the layer received an input with shape ' +
str(input_shape) + '.')
self.input_spec = InputSpec(ndim=len(input_shape),
axes={self.axis: dim})
shape = (dim,)
if self.scale:
self.gamma = self.add_weight(shape=shape,
name='gamma',
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(shape=shape,
name='beta',
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint)
else:
self.beta = None
self.moving_mean = self.add_non_trainable_weight(
shape=shape,
name='moving_mean',
initializer=self.moving_mean_initializer)
self.moving_variance = self.add_non_trainable_weight(
shape=shape,
name='moving_variance',
initializer=self.moving_variance_initializer)
self.built = True
def call(self, inputs, training=None):
# training = self._get_training_value(training)
input_shape = K.int_shape(inputs)
# Prepare broadcasting shape.
ndim = len(input_shape)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis]
# Determines whether broadcasting is needed.
needs_broadcasting = (sorted(reduction_axes) != list(range(ndim))[:-1])
# exec in call
gamma_init, gamma_exec = self.gamma
gamma = tf.add(gamma_init, gamma_exec())
beta_init, beta_exec = self.beta
beta = tf.add(beta_init, beta_exec())
def normalize_inference():
if needs_broadcasting:
# In this case we must explicitly broadcast all parameters.
broadcast_moving_mean = K.reshape(self.moving_mean,
broadcast_shape)
broadcast_moving_variance = K.reshape(self.moving_variance,
broadcast_shape)
if self.center:
broadcast_beta = K.reshape(beta, broadcast_shape)
else:
broadcast_beta = None
if self.scale:
broadcast_gamma = K.reshape(gamma,
broadcast_shape)
else:
broadcast_gamma = None
return K.batch_normalization(
inputs,
broadcast_moving_mean,
broadcast_moving_variance,
broadcast_beta,
broadcast_gamma,
epsilon=self.epsilon)
else:
return K.batch_normalization(
inputs,
self.moving_mean,
self.moving_variance,
beta,
gamma,
epsilon=self.epsilon)
# If the learning phase is *static* and set to inference:
# if tf.cond(training, tf.constant(True)):
# if training in {0, False}:
# return normalize_inference()
# If the learning is either dynamic, or set to training:
# print(inputs)
# print(gamma, beta)
normed_training, mean, variance = K.normalize_batch_in_training(
inputs, gamma, beta, reduction_axes,
epsilon=self.epsilon)
self.add_update([K.moving_average_update(self.moving_mean,
mean,
self.momentum),
K.moving_average_update(self.moving_variance,
variance,
self.momentum)],
inputs)
# Pick the normalized form corresponding to the training phase.
return K.in_train_phase(normed_training,
normalize_inference,
training=training)
# def _get_training_value(self, training=None):
# print(training)
# if training is None:
# training = K.learning_phase()
#
# if isinstance(training, int):
# training = bool(training)
# return training
# return training == tf.Tensor()
# if base_layer_utils.is_in_keras_graph():
# training = math_ops.logical_and(training, self._get_trainable_var())
# else:
# training = math_ops.logical_and(training, self.trainable)
# return training
class RProjLocallyConnected2D(LowRankBasisLayer):
"""Locally-connected layer for 2D inputs.
The `LocallyConnected2D` layer works similarly
to the `Conv2D` layer, except that weights are unshared,
that is, a different set of filters is applied at each
different patch of the input.
"""
# @interfaces.legacy_conv2d_support
def __init__(self, offset_creator_class, weight_basis,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(RProjLocallyConnected2D, self).__init__(offset_creator_class, weight_basis, **kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.padding != 'valid':
raise ValueError('Invalid border mode for LocallyConnected2D '
'(only "valid" is supported): ' + padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=4)
def build(self, input_shape):
if self.data_format == 'channels_last':
input_row, input_col = input_shape[1:-1]
input_filter = input_shape[3]
else:
input_row, input_col = input_shape[2:]
input_filter = input_shape[1]
if input_row is None or input_col is None:
raise ValueError('The spatial dimensions of the inputs to '
' a LocallyConnected2D layer '
'should be fully-defined, but layer received '
'the inputs shape ' + str(input_shape))
output_row = conv_utils.conv_output_length(input_row, self.kernel_size[0],
self.padding, self.strides[0])
output_col = conv_utils.conv_output_length(input_col, self.kernel_size[1],
self.padding, self.strides[1])
self.output_row = output_row
self.output_col = output_col
self.kernel_shape = (output_row * output_col,
self.kernel_size[0] * self.kernel_size[1] * input_filter,
self.filters)
self.kernel = self.add_weight(shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(output_row, output_col, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
if self.data_format == 'channels_first':
self.input_spec = InputSpec(ndim=4, axes={1: input_filter})
else:
self.input_spec = InputSpec(ndim=4, axes={-1: input_filter})
self.built = True
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding, self.strides[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding, self.strides[1])
if self.data_format == 'channels_first':
return (input_shape[0], self.filters, rows, cols)
elif self.data_format == 'channels_last':
return (input_shape[0], rows, cols, self.filters)
def call(self, inputs):
_, _, filters = self.kernel_shape
output = K.local_conv2d(inputs,
self.kernel,
self.kernel_size,
self.strides,
(self.output_row, self.output_col),
self.data_format)
if self.use_bias:
if self.data_format == 'channels_first' or self.data_format == 'channels_last':
output = K.bias_add(output, self.bias, data_format=self.data_format)
output = self.activation(output)
return output
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(LocallyConnected2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 33,100 | 41.491656 | 116 |
py
|
fl-analysis
|
fl-analysis-master/src/data/emnist.py
|
import os
import h5py
import tensorflow as tf
import numpy as np
def load_data(only_digits=True, cache_dir=None):
"""Loads the Federated EMNIST dataset.
Downloads and caches the dataset locally. If previously downloaded, tries to
load the dataset from cache.
This dataset is derived from the Leaf repository
(https://github.com/TalwalkarLab/leaf) pre-processing of the Extended MNIST
dataset, grouping examples by writer. Details about Leaf were published in
"LEAF: A Benchmark for Federated Settings" https://arxiv.org/abs/1812.01097.
*Note*: This dataset does not include some additional preprocessing that
MNIST includes, such as size-normalization and centering.
In the Federated EMNIST data, the value of 1.0
corresponds to the background, and 0.0 corresponds to the color of the digits
themselves; this is the *inverse* of some MNIST representations,
e.g. in [tensorflow_datasets]
(https://github.com/tensorflow/datasets/blob/master/docs/datasets.md#mnist),
where 0 corresponds to the background color, and 255 represents the color of
the digit.
Data set sizes:
*only_digits=True*: 3,383 users, 10 label classes
- train: 341,873 examples
- test: 40,832 examples
*only_digits=False*: 3,400 users, 62 label classes
- train: 671,585 examples
- test: 77,483 examples
Rather than holding out specific users, each user's examples are split across
_train_ and _test_ so that all users have at least one example in _train_ and
one example in _test_. Writers that had less than 2 examples are excluded from
the data set.
The `tf.data.Datasets` returned by
`tff.simulation.ClientData.create_tf_dataset_for_client` will yield
`collections.OrderedDict` objects at each iteration, with the following keys
and values:
- `'pixels'`: a `tf.Tensor` with `dtype=tf.float32` and shape [28, 28],
containing the pixels of the handwritten digit, with values in
the range [0.0, 1.0].
- `'label'`: a `tf.Tensor` with `dtype=tf.int32` and shape [1], the class
label of the corresponding pixels. Labels [0-9] correspond to the digits
classes, labels [10-35] correspond to the uppercase classes (e.g., label
11 is 'B'), and labels [36-61] correspond to the lowercase classes
(e.g., label 37 is 'b').
Args:
only_digits: (Optional) whether to only include examples that are from the
digits [0-9] classes. If `False`, includes lower and upper case
characters, for a total of 62 class labels.
cache_dir: (Optional) directory to cache the downloaded file. If `None`,
caches in Keras' default cache directory.
Returns:
Tuple of (train, test) where the tuple elements are
`tff.simulation.ClientData` objects.
"""
if only_digits:
fileprefix = 'fed_emnist_digitsonly'
sha256 = '55333deb8546765427c385710ca5e7301e16f4ed8b60c1dc5ae224b42bd5b14b'
else:
fileprefix = 'fed_emnist'
sha256 = 'fe1ed5a502cea3a952eb105920bff8cffb32836b5173cb18a57a32c3606f3ea0'
filename = fileprefix + '.tar.bz2'
path = tf.keras.utils.get_file(
filename,
origin='https://storage.googleapis.com/tff-datasets-public/' + filename,
file_hash=sha256,
hash_algorithm='sha256',
extract=True,
archive_format='tar',
cache_dir=cache_dir)
dir_path = os.path.dirname(path)
train_client_data = process_h5py(os.path.join(dir_path, fileprefix + '_train.h5'))
test_client_data = process_h5py(os.path.join(dir_path, fileprefix + '_test.h5'))
return train_client_data, test_client_data
def process_h5py(filename):
file = h5py.File(filename, 'r')
drawers = file['examples']
out = []
for i, key in enumerate(drawers.keys()):
out.append({ 'pixels': drawers[key]['pixels'].value, 'label': drawers[key]['label'].value})
return np.asarray(out)
| 3,842 | 43.172414 | 99 |
py
|
fl-analysis
|
fl-analysis-master/src/data/tf_data.py
|
import itertools
import math
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from src.data import image_augmentation
from src.data import emnist
class Dataset:
def __init__(self, x_train, y_train, batch_size=50, x_test=None, y_test=None):
self.batch_size = batch_size
# LIMIT = 5000 # for debugging remove this
# x_train, y_train = x_train[:LIMIT], y_train[:LIMIT]
self.x_train, self.y_train = self.shuffle(x_train, y_train)
self.x_test, self.y_test = x_test, y_test
self.x_aux, self.y_aux, self.mal_aux_labels = None, None, None
self.x_aux_test, self.mal_aux_labels_test = None, None
self.fg = tf.data.Dataset.from_tensor_slices((self.x_train, self.y_train))
def shuffle(self, x, y):
perms = np.random.permutation(x.shape[0])
return x[perms, :], y[perms]
def get_data(self):
"""Creates one batch of data.
# This is a TERRIBLE way to load data... every epoch we get it in the same order !!!
Yields:˚
tuple of two: input data batch and corresponding labels
"""
# count = int(self.x_train.shape[0] / self.batch_size)
# if count == 0:
# yield self.x_train, self.y_train
# # return [(self.x_train, self.y_train)]
# for bid in range(count): # Note: Unsafe if batch_size is small!!!
# batch_x = self.x_train[bid * self.batch_size:(bid + 1) * self.batch_size]
# batch_y = self.y_train[bid * self.batch_size:(bid + 1) * self.batch_size]
#
# yield batch_x, batch_y
# bid = 0
shuffle_size = min(self.x_train.shape[0], 10000)
return self.fg \
.shuffle(shuffle_size) \
.batch(self.batch_size, drop_remainder=True) \
.prefetch(tf.data.experimental.AUTOTUNE)
def get_aux(self, mal_num_batch):
"""Creates one batch of data.
Yields:
tuple of two: input data batch and corresponding labels
"""
if int(self.x_aux.shape[0] / self.batch_size) < 1:
yield self.x_aux, self.mal_aux_labels
for bid in range(int(self.x_aux.shape[0] / self.batch_size)):
batch_x = self.x_aux[bid * self.batch_size:(bid + 1) * self.batch_size]
batch_y = self.mal_aux_labels[bid * self.batch_size:(bid + 1) * self.batch_size]
yield batch_x, batch_y
bid = 0
def get_data_with_aux(self, insert_aux_times, num_batches, pixel_pattern=None, noise_level=None):
"""Creates one batch of data with the AUX data inserted `insert_aux_times` per batch with malicious labels.
:param insert_aux_times number of times aux should be inserted per batch. 1 for Bagdasaryan
:param num_batches number of batches to generate. 200 for Bagdasaryan
:param noise_level sigma of normal distribution noise to add to training samples
Yields:
tuple of two: input data batch and corresponding labels
"""
multiplier = max(float(insert_aux_times) / float(self.mal_aux_labels.shape[0]),
1) # potential multiplier if aux is smaller than insert
number_of_mal_items = int(multiplier * num_batches)
r1 = insert_aux_times
r2 = self.batch_size - insert_aux_times
normal_mult = max(float(num_batches) * float(self.batch_size) / self.x_train.shape[0], 1)
normal_fg = self.fg \
.repeat(int(math.ceil(normal_mult * self.x_train.shape[0]))) \
.shuffle(self.x_train.shape[0]) \
.batch(r2, drop_remainder=True) \
if insert_aux_times == 0:
return normal_fg
mal_fb = tf.data.Dataset.from_tensor_slices((self.x_aux, self.mal_aux_labels)) \
.repeat(number_of_mal_items) \
.shuffle(number_of_mal_items)
if noise_level is not None: # Add noise
mal_fb = mal_fb.map(image_augmentation.add_noise(noise_level))
mal_fb = mal_fb.batch(r1, drop_remainder=True)
zipped = tf.data.Dataset.zip((mal_fb, normal_fg)).map(lambda x, y:
(tf.concat((x[0], y[0]), axis=0),
tf.concat((x[1], y[1]), axis=0))
)
result = zipped.unbatch()
return result.batch(self.batch_size, drop_remainder=True) \
.take(num_batches)
def get_aux_test_generator(self, aux_size):
if aux_size == 0:
return tf.data.Dataset.from_tensor_slices((self.x_aux_test, self.mal_aux_labels_test)) \
.batch(self.batch_size, drop_remainder=False) \
.prefetch(tf.data.experimental.AUTOTUNE)
return tf.data.Dataset.from_tensor_slices((self.x_aux_test, self.mal_aux_labels_test)) \
.repeat(aux_size) \
.batch(self.batch_size, drop_remainder=False) \
.prefetch(tf.data.experimental.AUTOTUNE)
@staticmethod
def keep_samples(x_train, y_train, number_of_samples):
if number_of_samples == -1:
return x_train, y_train
perms = np.random.permutation(number_of_samples)
return x_train[perms, :], y_train[perms]
@staticmethod
def keep_samples_iterative(x_train, y_train, number_of_samples):
if number_of_samples == -1:
return x_train, y_train
perms = [np.random.permutation(min(number_of_samples, val.shape[0])) for val in x_train]
return [val[perm, :] for val, perm in zip(x_train, perms)], \
[val[perm] for val, perm in zip(y_train, perms)]
@staticmethod
def apply_trigger(x_aux):
triggersize = 4
trigger = np.ones((x_aux.shape[0], triggersize, triggersize, 1))
out = x_aux
out[:, 0:triggersize, 0:triggersize, :] = trigger
return out
@staticmethod
def get_mnist_dataset(number_of_samples):
"""MNIST dataset loader"""
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train, x_test = x_train.astype(np.float32), x_test.astype(np.float32)
x_train, x_test = x_train[..., np.newaxis], x_test[..., np.newaxis]
x_train, y_train = Dataset.keep_samples(x_train, y_train, number_of_samples)
return (x_train, y_train), (x_test, y_test)
@staticmethod
def get_fmnist_dataset(number_of_samples):
"""Fashion MNIST dataset loader"""
fmnist = tf.keras.datasets.fashion_mnist
(x_train, y_train), (x_test, y_test) = fmnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train, x_test = x_train.astype(np.float32), x_test.astype(np.float32)
x_train, x_test = x_train[..., np.newaxis], x_test[..., np.newaxis]
x_train, y_train = Dataset.keep_samples(x_train, y_train, number_of_samples)
return (x_train, y_train), (x_test, y_test)
@staticmethod
def get_cifar10_dataset(number_of_samples):
"""Cifar10 dataset loader"""
cifar = tf.keras.datasets.cifar10
(x_train, y_train), (x_test, y_test) = cifar.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train, x_test = x_train.astype(np.float32), x_test.astype(np.float32)
y_train, y_test = np.squeeze(y_train, axis=1), np.squeeze(y_test, axis=1)
x_train, y_train = Dataset.keep_samples(x_train, y_train, number_of_samples)
x_test, y_test = Dataset.keep_samples(x_test, y_test, -1) # Note: hardcoded
# Subtract
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
return (x_train, y_train), (x_test, y_test)
@staticmethod
def get_emnist_dataset(number_of_samples, number_of_clients, normalize_mnist_data):
"""nonIID MNIST dataset loader"""
train_dataset, test_dataset = emnist.load_data()
x_train, y_train = np.array([1.0 - np.array(val['pixels']) for val in train_dataset]), \
np.array([np.array(val['label']).astype(np.uint8) for val in train_dataset])
x_test, y_test = np.array([1.0 - np.array(val['pixels']) for val in test_dataset]), \
np.array([np.array(val['label']).astype(np.uint8) for val in test_dataset])
if normalize_mnist_data:
emnist_mean, emnist_std = 0.036910772, 0.16115953
x_train = np.array([(x - emnist_mean) / emnist_std for x in x_train])
x_test = np.array([(x - emnist_mean) / emnist_std for x in x_test])
# Randomly assign clients to buckets but keep them as client
if number_of_clients < x_train.shape[0]:
assignments = np.random.randint(0, number_of_clients, x_train.shape[0], dtype=np.uint16)
new_x_train = []
new_y_train = []
new_x_test = []
new_y_test = []
for i in range(number_of_clients):
new_x_train.append(
np.concatenate(x_train[assignments == i], axis=0)
)
new_y_train.append(
np.concatenate(y_train[assignments == i], axis=0)
)
new_x_test.append(
np.concatenate(x_test[assignments == i], axis=0)
)
new_y_test.append(
np.concatenate(y_test[assignments == i], axis=0)
)
#
# new_x_train = np.concatenate(new_x_train, axis=0)
# new_y_train = np.concatenate(new_y_train, axis=0)
# new_x_test = np.concatenate(new_x_test, axis=0)
# new_y_test = np.concatenate(new_y_test, axis=0)
if number_of_samples == -1:
number_of_samples_per_client = -1
else:
number_of_samples_per_client = int(number_of_samples / float(number_of_clients))
x_train, y_train = Dataset.keep_samples_iterative(new_x_train, new_y_train, number_of_samples_per_client)
x_test, y_test = Dataset.keep_samples_iterative(new_x_test, new_y_test,
min(number_of_samples_per_client, 500))
elif number_of_clients > x_train.shape[0]:
print(f"Number of clients {number_of_clients} is large than amount of EMNIST users {x_train.shape[0]}")
else:
print("Exactly using EMNIST as clients!")
x_train, x_test = [val.astype(np.float32)[..., np.newaxis] for val in x_train], \
[val.astype(np.float32)[..., np.newaxis] for val in x_test]
return (x_train, y_train), (x_test, y_test)
class ImageGeneratorDataset(Dataset):
def __init__(self, x_train, y_train, batch_size=50, x_test=None, y_test=None):
super().__init__(x_train, y_train, batch_size, x_test, y_test)
def get_aux_test_generator(self, aux_size):
if aux_size == 0:
return tf.data.Dataset.from_tensor_slices((self.x_aux_test, self.mal_aux_labels_test)) \
.batch(self.batch_size, drop_remainder=False) \
.prefetch(tf.data.experimental.AUTOTUNE)
test_dataset = tf.data.Dataset.from_tensor_slices((self.x_aux_test, self.mal_aux_labels_test)) \
.repeat(aux_size) \
.batch(self.batch_size, drop_remainder=False) \
return test_dataset \
.map(image_augmentation.test_aux_augment, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
.prefetch(tf.data.experimental.AUTOTUNE)
def get_data(self):
return self.fg\
.shuffle(self.x_train.shape[0]) \
.batch(self.batch_size, drop_remainder=True) \
.map(image_augmentation.augment, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
.prefetch(tf.data.experimental.AUTOTUNE)
def get_aux(self, mal_num_batch):
multiplier = max(float(self.batch_size) / float(self.mal_aux_labels.shape[0]),
1) # potential multiplier if aux is smaller than insert
number_of_mal_items = int(multiplier * mal_num_batch)
return tf.data.Dataset.from_tensor_slices((self.x_aux, self.mal_aux_labels)) \
.repeat(number_of_mal_items) \
.batch(self.batch_size, drop_remainder=False) \
.map(image_augmentation.train_aux_augment, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
def get_data_with_aux(self, insert_aux_times, num_batches, noise_level=None):
"""Creates one batch of data with the AUX data inserted `insert_aux_times` per batch with malicious labels.
:param insert_aux_times number of times aux should be inserted per batch. 1 for Bagdasaryan
:param num_batches number of batches to generate. 200 for Bagdasaryan
Yields:
tuple of two: input data batch and corresponding labels
"""
# assert self.y_aux != [] and self.x_aux != [] and self.mal_aux_labels != []
multiplier = float(insert_aux_times) / float(self.mal_aux_labels.shape[0]) # potential multiplier if aux is smaller than insert
number_of_mal_items = int(math.ceil(multiplier * num_batches))
r1 = insert_aux_times
r2 = self.batch_size - insert_aux_times
normal_mult = max(float(num_batches) * float(self.batch_size) / self.x_train.shape[0], 1)
normal_fg = self.fg\
.repeat(int(math.ceil(normal_mult))) \
.shuffle(self.x_train.shape[0]) \
.batch(r2, drop_remainder=True) \
.map(image_augmentation.augment, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
if insert_aux_times == 0:
print(f"Insert 0 {normal_mult}")
return normal_fg
mal_fb = tf.data.Dataset.from_tensor_slices((self.x_aux, self.mal_aux_labels)) \
.repeat(number_of_mal_items) \
.shuffle(number_of_mal_items * self.mal_aux_labels.shape[0])
mal_fb = mal_fb.batch(r1, drop_remainder=True) \
.map(image_augmentation.train_aux_augment, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
if noise_level is not None: # Add noise
mal_fb = mal_fb.map(image_augmentation.add_noise_batch(noise_level))
zipped = tf.data.Dataset.zip((mal_fb, normal_fg)).map(lambda x, y:
(tf.concat((x[0], y[0]), axis=0), tf.concat((x[1], y[1]), axis=0))
)
result = zipped.unbatch()
return result.batch(self.batch_size, drop_remainder=True)\
.take(num_batches)
class PixelPatternDataset(ImageGeneratorDataset):
def __init__(self, x_train, y_train, target_label, batch_size=50, x_test=None, y_test=None):
super().__init__(x_train, y_train, batch_size, x_test, y_test)
# Assign train set part
(self.x_aux, self.y_aux) = \
(self.x_train, self.y_train)
self.mal_aux_labels = np.repeat(target_label, self.y_aux.shape).astype(np.uint8)
self.pixel_pattern = 'basic'
def get_aux_test_generator(self, aux_size):
if aux_size == 0:
return tf.data.Dataset.from_tensor_slices((self.x_aux_test, self.mal_aux_labels_test)) \
.batch(self.batch_size, drop_remainder=False) \
.map(image_augmentation.add_pixel_pattern(self.pixel_pattern)) \
.prefetch(tf.data.experimental.AUTOTUNE)
test_dataset = tf.data.Dataset.from_tensor_slices((self.x_aux_test, self.mal_aux_labels_test)) \
.repeat(aux_size) \
.batch(self.batch_size, drop_remainder=False) \
return test_dataset \
.map(image_augmentation.test_aux_augment, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
.map(image_augmentation.add_pixel_pattern(self.pixel_pattern)) \
.prefetch(tf.data.experimental.AUTOTUNE)
def get_data_with_aux(self, insert_aux_times, num_batches, noise_level=None):
"""Creates one batch of data with the AUX data inserted `insert_aux_times` per batch with malicious labels.
:param insert_aux_times number of times aux should be inserted per batch. 1 for Bagdasaryan
:param num_batches number of batches to generate. 200 for Bagdasaryan
Yields:
tuple of two: input data batch and corresponding labels
"""
# assert self.y_aux != [] and self.x_aux != [] and self.mal_aux_labels != []
multiplier = float(insert_aux_times) / float(
self.mal_aux_labels.shape[0]) # potential multiplier if aux is smaller than insert
number_of_mal_items = int(math.ceil(multiplier * num_batches))
r1 = insert_aux_times
r2 = self.batch_size - insert_aux_times
normal_mult = max(float(num_batches) * float(self.batch_size) / self.x_train.shape[0], 1)
normal_fg = self.fg \
.repeat(int(math.ceil(normal_mult))) \
.shuffle(self.x_train.shape[0]) \
.batch(r2, drop_remainder=True) \
.map(image_augmentation.augment, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if insert_aux_times == 0:
return normal_fg
mal_fb = tf.data.Dataset.from_tensor_slices((self.x_aux, self.mal_aux_labels)) \
.repeat(number_of_mal_items) \
.shuffle(number_of_mal_items * self.mal_aux_labels.shape[0])
mal_fb = mal_fb.batch(r1, drop_remainder=True) \
.map(image_augmentation.train_aux_augment, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if noise_level is not None: # Add noise
mal_fb = mal_fb.map(image_augmentation.add_noise_batch(noise_level))
mal_fb = mal_fb.map(image_augmentation.add_pixel_pattern(self.pixel_pattern))
zipped = tf.data.Dataset.zip((mal_fb, normal_fg)).map(lambda x, y:
(tf.concat((x[0], y[0]), axis=0),
tf.concat((x[1], y[1]), axis=0))
)
result = zipped.unbatch()
return result.batch(self.batch_size, drop_remainder=True)\
.take(num_batches)
class GeneratorDataset(Dataset):
def __init__(self, generator, batch_size):
super().__init__([], [], 0, None, None)
self.generator = generator
self.batch_size = batch_size
def get_data(self):
return self.generator\
.batch(self.batch_size)\
.prefetch(tf.data.experimental.AUTOTUNE)
| 18,878 | 42.802784 | 135 |
py
|
fl-analysis
|
fl-analysis-master/src/data/image_augmentation.py
|
import tensorflow as tf
import numpy as np
from tensorflow.keras.preprocessing.image import apply_affine_transform
def augment(image,label):
image = tf.image.random_flip_left_right(image)
image = tf.numpy_function(shift, [image], tf.float32)
image = normalize(image)
# debug(image, label)
return image, label
def test_augment(image,label):
return normalize(image), label
def train_aux_augment(image, label):
image = tf.image.random_flip_left_right(image)
image = tf.numpy_function(shift, [image], tf.float32)
# image = tf.add(image, tf.random.normal(tf.shape(image), 0, 0.05))
return image, label
def test_aux_augment(image, label):
"""Augmentation if aux test set is small"""
return augment(image, label) # same as training
def normalize(image):
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.2010]
# tf.print("Before:", tf.shape(image), tf.math.reduce_std(image))
# image = tf.image.per_image_standardization(image)
# image = image - tf.reshape(mean, [1, 1, 1, 3])
# image = image / tf.reshape(std, [1, 1, 1, 3])
# tf.print("After:", tf.shape(image), tf.math.reduce_std(image))
return image
def shift(images):
return np.array([shift_single(i) for i in images])
def shift_single(image):
""" Expects numpy, single image """
shape = image.shape
tx = np.random.uniform(-0.1, 0.1) * shape[0]
ty = np.random.uniform(-0.1, 0.1) * shape[1]
image = apply_affine_transform(image, 0,
tx, # tx
ty,
0,
1,
1,
row_axis=0,
col_axis=1,
channel_axis=2,
fill_mode='nearest')
return image
def add_noise_batch(sigma):
def cb(images, labels):
images = images + tf.random.normal(tf.shape(images), mean=0, stddev=sigma)
return images, labels
return cb
def add_pixel_pattern(pixel_pattern):
triggersize = 4
def np_callback(images):
trigger = np.ones((images.shape[0], triggersize, triggersize, images.shape[-1]))
images[:, 0:triggersize, 0:triggersize, :] = trigger
return images
def cb(images, labels):
# shape = tf.shape(images)
# tf.print(shape)
# print(shape)
# trigger = tf.ones((shape[0], triggersize, triggersize, shape[-1]))
# trigger = tf.ones((None, triggersize, triggersize, 3))
# tf.ones_like
# d0 = shape[0]
# tf.print(d0)
# x = tf.constant(tf.float32, shape=[d0, triggersize, triggersize, 3])
# trigger = tf.ones_like(x)
# images[:, 0:triggersize, 0:triggersize, :] = trigger
# this callback is slower i think
images = tf.numpy_function(np_callback, [images], tf.float32)
return images, labels
return cb
def pixel_pattern_if_needed(needed):
def no_op(images, labels):
return images, labels
if needed:
return add_pixel_pattern(None)
else:
return no_op
def debug(image, label):
import matplotlib.pyplot as plt
for i in range(image.shape[0]):
plt.figure()
plt.imshow(image[i] + 0.5)
plt.title(f"Label: {label[i]}")
plt.show()
| 3,414 | 28.695652 | 88 |
py
|
fl-analysis
|
fl-analysis-master/src/data/data_loader.py
|
from src.attack_dataset_config import AttackDatasetConfig
from src.backdoor.edge_case_attack import EdgeCaseAttack
from src.client_attacks import Attack
from src.data.tf_data import Dataset
from src.data.tf_data_global import GlobalDataset, IIDGlobalDataset, NonIIDGlobalDataset, DirichletDistributionDivider
from src.config.definitions import Config
from src.data.leaf_loader import load_leaf_dataset, process_text_input_indices, process_char_output_indices
import numpy as np
def load_global_dataset(config, malicious_clients, attack_dataset) -> GlobalDataset:
"""Loads dataset according to config parameter, returns GlobalData
:type config: Config
:type malicious_clients: np.array boolean list of clients malicious state
"""
attack_type = Attack(config.client.malicious.attack_type) \
if config.client.malicious is not None else None
dataset: GlobalDataset
if attack_type == Attack.BACKDOOR and attack_dataset.type == 'edge':
pass # We are reloading in edge
else:
(dataset, (x_train, y_train)) = get_dataset(config, attack_dataset)
if attack_type == Attack.BACKDOOR:
attack_ds_config: AttackDatasetConfig = attack_dataset
if attack_ds_config.type == 'semantic':
assert attack_ds_config.train != [] and attack_ds_config.test, \
"Must set train and test for a semantic backdoor!"
# Based on pre-chosen images
build_attack_selected_aux(dataset, x_train, y_train,
attack_ds_config.train,
attack_ds_config.test,
attack_ds_config.target_label,
[], #config['backdoor_feature_benign_regular'],
attack_ds_config.remove_from_benign_dataset, None)
elif attack_ds_config.type == 'semantic_pixel_pattern':
assert attack_ds_config.train != [] and attack_ds_config.test, \
"Must set train and test for a semantic backdoor!"
# Based on pre-chosen images
build_attack_selected_aux(dataset, x_train, y_train,
attack_ds_config.train,
attack_ds_config.test,
attack_ds_config.target_label,
[], #config['backdoor_feature_benign_regular'],
attack_ds_config.remove_from_benign_dataset,
attack_ds_config.trigger_position)
elif attack_ds_config.type == 'tasks':
# Construct 'backdoor tasks'
build_attack_backdoor_tasks(dataset, malicious_clients,
attack_ds_config.tasks,
[attack_ds_config.source_label, attack_ds_config.target_label],
attack_ds_config.aux_samples,
attack_ds_config.augment_times)
elif attack_ds_config.type == 'tasks_pixel_pattern':
build_attack_backdoor_tasks_pixel_pattern(dataset, malicious_clients,
attack_ds_config.tasks,
[attack_ds_config.source_label, attack_ds_config.target_label],
attack_ds_config.aux_samples,
attack_ds_config.augment_times,
attack_ds_config.trigger_position)
elif attack_ds_config.type == 'edge':
assert attack_ds_config.edge_case_type is not None, "Please specify an edge case type"
# We have to reload the dataset adding the benign samples.
(x_aux_train, mal_aux_labels_train), (x_aux_test, mal_aux_labels_test), (benign_x, benign_y) =\
build_edge_case_attack(attack_ds_config.edge_case_type, attack_ds_config.edge_case_p,
config.dataset.normalize_mnist_data)
(dataset, (x_t_tst, _)) = get_dataset(config, attack_dataset, benign_x, benign_y)
(dataset.x_aux_train, dataset.mal_aux_labels_train), (dataset.x_aux_test, dataset.mal_aux_labels_test) = \
(x_aux_train, mal_aux_labels_train), (x_aux_test, mal_aux_labels_test)
elif attack_ds_config.type == 'pixel_pattern':
# do nothing
build_pixel_pattern(dataset, attack_ds_config.target_label)
else:
raise NotImplementedError(f"Backdoor type {attack_ds_config.type} not supported!")
elif attack_type == 'untargeted':
pass
else:
pass # silent fail for now
return dataset
def build_attack_backdoor_tasks(dataset, malicious_clients,
backdoor_tasks, malicious_objective, aux_samples, augment_times):
dataset.build_global_aux(malicious_clients,
backdoor_tasks,
malicious_objective,
aux_samples,
augment_times)
def build_attack_backdoor_tasks_pixel_pattern(dataset, malicious_clients,
backdoor_tasks, malicious_objective, aux_samples, augment_times, trigger_position):
dataset.build_global_aux(malicious_clients,
backdoor_tasks,
malicious_objective,
aux_samples,
augment_times)
def pixel_pattern(images, tp):
triggersize = 4
trigger = np.ones((images.shape[0], triggersize, triggersize, images.shape[-1]))
images[:, tp:(triggersize+tp), tp:(triggersize+tp), :] = trigger
return images
dataset.x_aux_train = pixel_pattern(dataset.x_aux_train, trigger_position)
dataset.x_aux_test = pixel_pattern(dataset.x_aux_test, trigger_position)
def build_attack_selected_aux(ds, x_train, y_train,
backdoor_train_set, backdoor_test_set, backdoor_target,
benign_train_set_extra, remove_malicious_samples, trigger_position):
"""Builds attack based on selected backdoor images"""
(ds.x_aux_train, ds.y_aux_train), (ds.x_aux_test, ds.y_aux_test) = \
(x_train[np.array(backdoor_train_set)],
y_train[np.array(backdoor_train_set)]), \
(x_train[np.array(backdoor_test_set)],
y_train[np.array(backdoor_test_set)])
ds.mal_aux_labels_train = np.repeat(backdoor_target,
ds.y_aux_train.shape).astype(np.uint8)
ds.mal_aux_labels_test = np.repeat(backdoor_target, ds.y_aux_test.shape).astype(np.uint8)
if benign_train_set_extra:
extra_train_x, extra_train_y = x_train[np.array(benign_train_set_extra)], \
y_train[np.array(benign_train_set_extra)]
ds.x_aux_train = np.concatenate([ds.x_aux_train, extra_train_x])
ds.y_aux_train = np.concatenate([ds.y_aux_train, extra_train_y])
ds.mal_aux_labels_train = np.concatenate([ds.mal_aux_labels_train, extra_train_y])
if trigger_position is not None:
def pixel_pattern(images, tp):
triggersize = 4
# 0.6 because normalization
trigger = np.full((images.shape[0], triggersize, triggersize, images.shape[-1]), 0.6)
trigger[:, :, :, 2] = 0
images[:, tp:(triggersize + tp), tp:(triggersize + tp), :] = trigger
return images
ds.x_aux_train = pixel_pattern(ds.x_aux_train, trigger_position)
ds.x_aux_test = pixel_pattern(ds.x_aux_test, trigger_position)
if remove_malicious_samples:
np.delete(x_train, backdoor_train_set, axis=0)
np.delete(y_train, backdoor_train_set, axis=0)
np.delete(x_train, backdoor_test_set, axis=0)
np.delete(y_train, backdoor_test_set, axis=0)
def shuffle(x, y):
perms = np.random.permutation(x.shape[0])
return x[perms, :], y[perms]
def build_edge_case_attack(edge_case, adv_edge_case_p, normalize_mnist_data):
attack: EdgeCaseAttack = factory(edge_case)
(x_aux_train, mal_aux_labels_train), (x_aux_test, mal_aux_labels_test), (orig_y_train, _) =\
attack.load()
if normalize_mnist_data:
emnist_mean, emnist_std = 0.036910772, 0.16115953
x_aux_train = (x_aux_train - emnist_mean) / emnist_std
x_aux_test = (x_aux_test - emnist_mean) / emnist_std
# If necessary, distribute edge_case samples by p
# TODO: Fix shuffle for orig_y_train, only not working when labels differ!!!
x_aux_train, mal_aux_labels_train = shuffle(x_aux_train, mal_aux_labels_train)
x_aux_test, mal_aux_labels_test = shuffle(x_aux_test, mal_aux_labels_test)
x_benign, y_benign = None, None
if adv_edge_case_p < 1.0:
# Some edge case values must be incorporated into the benign training set.
index = int(adv_edge_case_p * x_aux_train.shape[0])
x_benign, y_benign = x_aux_train[index:, :], orig_y_train[index:]
x_aux_train, mal_aux_labels_train = x_aux_train[:index, :], mal_aux_labels_train[:index]
return (x_aux_train, mal_aux_labels_train), (x_aux_test, mal_aux_labels_test), (x_benign, y_benign)
# Note: ds.y_aux_train, ds.y_aux_test not set
def build_pixel_pattern(ds, backdoor_target):
# (ds.x_aux_train, ds.y_aux_train), (ds.x_aux_test, ds.y_aux_test) = \
# (ds.x_train, ds.y_train), \
# (ds.x_test, ds. y_test)
# ds.mal_aux_labels_train = np.repeat(backdoor_target,
# ds.y_aux_train.shape).astype(np.uint8)
# ds.mal_aux_labels_test = np.repeat(backdoor_target, ds.y_aux_test.shape).astype(np.uint8)
# Assign test set
(ds.x_aux_test, ds.y_aux_test) = \
(ds.x_test, ds. y_test)
ds.mal_aux_labels_test = np.repeat(backdoor_target, ds.y_aux_test.shape).astype(np.uint8)
def factory(classname):
from src.backdoor import edge_case_attack
cls = getattr(edge_case_attack, classname)
return cls()
def get_dataset(config, attack_ds_config, add_x_train=None, add_y_train=None):
"""
@param config:
@param attack_ds_config:
@param add_x_train: x_train samples to add to training set
@param add_y_train: y_train samples to add to training set
@return:
"""
dataset = config.dataset.dataset
number_of_samples = config.dataset.number_of_samples
data_distribution = config.dataset.data_distribution
normalize_mnist_data = config.dataset.normalize_mnist_data # Legacy
num_clients = config.environment.num_clients
if dataset == 'mnist':
(x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(number_of_samples)
if add_x_train is not None:
x_train = np.concatenate([x_train, add_x_train])
y_train = np.concatenate([y_train, add_y_train])
if data_distribution == 'IID':
ds = IIDGlobalDataset(x_train, y_train, num_clients=num_clients, x_test=x_test, y_test=y_test)
else:
(x_train_dist, y_train_dist) = \
DirichletDistributionDivider(x_train, y_train, attack_ds_config.train,
attack_ds_config.test,
attack_ds_config.remove_from_benign_dataset,
num_clients).build()
ds = NonIIDGlobalDataset(x_train_dist, y_train_dist, x_test, y_test, num_clients=num_clients)
elif dataset == 'fmnist':
if data_distribution == 'IID':
(x_train, y_train), (x_test, y_test) = Dataset.get_fmnist_dataset(number_of_samples)
if add_x_train is not None:
x_train = np.concatenate([x_train, add_x_train])
y_train = np.concatenate([y_train, add_y_train])
ds = IIDGlobalDataset(x_train, y_train, num_clients=num_clients, x_test=x_test, y_test=y_test)
else:
raise Exception('Distribution not supported')
elif dataset == 'femnist':
if data_distribution == 'IID':
(x_train, y_train), (x_test, y_test) = Dataset.get_emnist_dataset(number_of_samples,
num_clients,
normalize_mnist_data)
(x_train, y_train), (x_test, y_test) = (
Dataset.keep_samples(np.concatenate(x_train), np.concatenate(y_train), number_of_samples),
Dataset.keep_samples(np.concatenate(x_test), np.concatenate(y_test), number_of_samples))
if add_x_train is not None:
x_train = np.concatenate([x_train, add_x_train])
y_train = np.concatenate([y_train, add_y_train])
ds = IIDGlobalDataset(x_train, y_train, num_clients, x_test, y_test)
else:
(x_train, y_train), (x_test, y_test) = Dataset.get_emnist_dataset(number_of_samples,
num_clients,
normalize_mnist_data)
if add_x_train is not None:
# Here, x_train and y_train are already separated by handwriter.. Add to random handwriters
handwriter_indices = np.random.choice(len(x_train), add_x_train.shape[0], replace=True)
for index in handwriter_indices:
x_train[index] = np.concatenate([x_train[index], add_x_train[index:(index+1), :]])
y_train[index] = np.concatenate([y_train[index], add_y_train[index:(index + 1)]])
for index in range(len(x_train)):
x_train[index], y_train[index] = shuffle(x_train[index], y_train[index])
ds = NonIIDGlobalDataset(x_train, y_train, np.concatenate(x_test), np.concatenate(y_test),
num_clients)
x_train, y_train = np.concatenate(x_train), np.concatenate(y_train) # For aux
elif dataset == 'cifar10':
(x_train, y_train), (x_test, y_test) = Dataset.get_cifar10_dataset(number_of_samples)
if add_x_train is not None:
x_train = np.concatenate([x_train, add_x_train])
y_train = np.concatenate([y_train, add_y_train])
if data_distribution == 'IID':
ds = IIDGlobalDataset(x_train, y_train, num_clients=num_clients, x_test=x_test, y_test=y_test)
else:
if attack_ds_config is not None:
(x_train_dist, y_train_dist) = \
DirichletDistributionDivider(x_train, y_train, attack_ds_config.train,
attack_ds_config.test,
attack_ds_config.remove_from_benign_dataset,
num_clients).build()
else:
(x_train_dist, y_train_dist) = \
DirichletDistributionDivider(x_train, y_train, [],
[],
False,
num_clients).build()
ds = NonIIDGlobalDataset(x_train_dist, y_train_dist, x_test, y_test, num_clients=num_clients)
elif dataset == 'shakespeare':
users, train_data, test_data = load_leaf_dataset("shakespeare")
if data_distribution == "IID":
x_train = [process_text_input_indices(train_data[user]['x']) for user in users]
y_train = [process_char_output_indices(train_data[user]['y']) for user in users]
x_test = np.concatenate([process_text_input_indices(test_data[user]['x']) for user in users])
y_test = np.concatenate([process_char_output_indices(test_data[user]['y']) for user in users])
if num_clients == 1:
x_train = [np.concatenate(x_train)]
y_train = [np.concatenate(y_train)]
ds = NonIIDGlobalDataset(x_train, y_train, x_test, y_test, num_clients=num_clients)
else:
x_train = np.concatenate(x_train)
y_train = np.concatenate(y_train)
ds = IIDGlobalDataset(x_train, y_train, num_clients, x_test, y_test)
else:
if num_clients == len(users):
# selected = np.random.choice(users, num_clients, replace=False)
selected = users
x_train = [process_text_input_indices(train_data[user]['x']) for user in selected]
y_train = [process_char_output_indices(train_data[user]['y']) for user in selected]
x_test = np.concatenate([process_text_input_indices(test_data[user]['x']) for user in selected])
y_test = np.concatenate([process_char_output_indices(test_data[user]['y']) for user in selected])
ds = NonIIDGlobalDataset(x_train, y_train, x_test, y_test, num_clients=num_clients)
else:
raise Exception("Smaller number of users in non-iid not supported!")
else:
raise Exception('Selected dataset with distribution not supported')
return ds, (x_train, y_train)
| 17,418 | 48.768571 | 118 |
py
|
fl-analysis
|
fl-analysis-master/src/data/leaf_loader.py
|
"""Loads leaf datasets"""
import os
import numpy as np
import pathlib
from src.data.leaf.model_utils import read_data
def load_leaf_dataset(dataset, use_val_set=False):
eval_set = 'test' if not use_val_set else 'val'
base_dir = pathlib.Path(__file__).parent.resolve()
train_data_dir = os.path.join(base_dir, 'leaf', dataset, 'data', 'train')
test_data_dir = os.path.join(base_dir, 'leaf', dataset, 'data', eval_set)
users, groups, train_data, test_data = read_data(train_data_dir, test_data_dir)
return users, train_data, test_data
# ------------------------
# utils for shakespeare dataset
ALL_LETTERS = "\n !\"&'(),-.0123456789:;>?ABCDEFGHIJKLMNOPQRSTUVWXYZ[]abcdefghijklmnopqrstuvwxyz}"
NUM_LETTERS = len(ALL_LETTERS)
def word_to_indices(word):
'''returns a list of character indices
Args:
word: string
Return:
indices: int list with length len(word)
'''
indices = []
for c in word:
indices.append(ALL_LETTERS.find(c))
return indices
def _one_hot(index, size):
'''returns one-hot vector with given size and value 1 at given index
'''
vec = [0 for _ in range(size)]
vec[int(index)] = 1
return vec
def letter_to_vec(letter):
'''returns one-hot representation of given letter
'''
index = ALL_LETTERS.find(letter)
return index
# return _one_hot(index, NUM_LETTERS)
def process_text_input_indices(x_batch: list):
x_batch = [word_to_indices(word) for word in x_batch]
x_batch = np.array(x_batch)
return x_batch
def process_char_output_indices(y_batch: list):
y_batch = [letter_to_vec(c) for c in y_batch]
y_batch = np.array(y_batch, dtype=np.uint8)
return y_batch
| 1,726 | 22.657534 | 98 |
py
|
fl-analysis
|
fl-analysis-master/src/data/ardis.py
|
import os
import h5py
import tensorflow as tf
import numpy as np
def load_data():
path = f"{os.path.dirname(os.path.abspath(__file__))}/ARDIS_7.npy"
(x_train, y_train), (x_test, y_test) = np.load(path, allow_pickle=True)
# Normalize
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train, x_test = np.moveaxis(x_train, 1, -1), np.moveaxis(x_test, 1, -1)
return (x_train, np.argmax(y_train, axis=1).astype(np.uint8)), (x_test, np.argmax(y_test, axis=1).astype(np.uint8))
| 490 | 27.882353 | 117 |
py
|
fl-analysis
|
fl-analysis-master/src/data/__init__.py
| 0 | 0 | 0 |
py
|
|
fl-analysis
|
fl-analysis-master/src/data/tf_data_global.py
|
from collections import defaultdict
import numpy as np
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
from src.data import image_augmentation
import logging
class GlobalDataset:
"""
A GlobalDataset represents a dataset as a whole. It has two purposes.
- Client datasets are derived from it
- Our global dataset is used for evaluation of the global model. `x_test`, `y_test` and the aux sets
"""
def __init__(self, x_test, y_test):
self.x_test = x_test
self.y_test = y_test
self.x_train = []
self.y_train = []
self.x_aux_train, self.y_aux_train, self.mal_aux_labels_train = \
[], [], []
self.x_aux_test, self.y_aux_test, self.mal_aux_labels_test = \
[], [], []
self.test_generator = tf.data.Dataset.from_tensor_slices((x_test, y_test))
self.aux_test_generator = None
def get_dataset_for_client(self, client_id):
raise Exception("Not implemented")
def get_normal_and_aux_dataset_for_client(self, client_id, aux_sample_size, attack_objective):
raise Exception("Not implemented")
def get_test_batch(self, batch_size, max_num_batches=-1):
"""Creates one batch of test data.
Yields:
tuple of two: input data batch and corresponding labels
"""
# count = min(int(self.x_test.shape[0] / batch_size), max_num_batches)
# for bid in range(count):
# batch_x = self.x_test[bid * batch_size:(bid + 1) * batch_size]
# batch_y = self.y_test[bid * batch_size:(bid + 1) * batch_size]
#
# yield batch_x, batch_y
# bid = 0
# Check here if non cifar?
return self.test_generator.batch(batch_size) \
.take(max_num_batches) \
.map(image_augmentation.test_augment, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
.prefetch(tf.data.experimental.AUTOTUNE)
# TODO 0: Do we want to use num_backdoor_tasks or aux_sample_size ?
def build_global_aux(self, mal_clients, num_backdoor_tasks, attack_objective, aux_sample_size, augment_size):
""" Select backdoor tasks """
if np.count_nonzero(mal_clients) == 0:
return # no aux
assert np.count_nonzero(mal_clients) >= num_backdoor_tasks # assert we have less 'tasks' than clients
data_x, data_y = self.x_train, self.y_train
total_x_aux, total_y_aux, total_mal_aux_labels = [], [], []
if aux_sample_size == -1:
aux_sample_size = 10000000 # fix as we reformat this
total_aux_count = 0
num_tasks = 0
for i in range(len(data_x)):
if num_tasks >= num_backdoor_tasks:
break
if total_aux_count >= aux_sample_size:
print(f"Hit limit of {total_aux_count}/{aux_sample_size} samples!")
break
if mal_clients[i]:
x_train_me, y_train_me = data_x[i], data_y[i]
# Pick attack samples
inds = np.where(y_train_me == attack_objective[0])[0] # Find all
logging.debug(f"{i} Found {len(inds)} of class {attack_objective} to poison!")
test_inds = np.ones(x_train_me.shape[0], dtype=bool)
test_inds[inds] = False
x_aux, y_aux = x_train_me[inds], y_train_me[inds]
x_train_me, y_train_me = x_train_me[test_inds], y_train_me[test_inds]
# randomly permute labels
mal_labels = np.repeat(attack_objective[1], len(y_aux))
current_aux_count = y_aux.size
if total_aux_count + current_aux_count > aux_sample_size:
# constrain
current_aux_count = aux_sample_size - total_aux_count # how many we have left
x_aux = x_aux[:current_aux_count, :]
y_aux = y_aux[:current_aux_count]
mal_labels = mal_labels[:current_aux_count]
total_x_aux.append(x_aux)
total_y_aux.append(y_aux)
total_mal_aux_labels.append(mal_labels)
data_x[i], data_y[i] = x_train_me, y_train_me
assert not np.any(
data_y[i] == attack_objective[0]) # assert data_y doesnt contain any attack label
total_aux_count += current_aux_count
num_tasks += 1
# assert len(total_x_aux) == num_backdoor_tasks # not applicable with aux_sample_size
self.x_aux_train = np.concatenate(total_x_aux)
self.y_aux_train = np.concatenate(total_y_aux)
self.mal_aux_labels_train = np.concatenate(total_mal_aux_labels).astype(np.uint8)
# Assign train as test set for now ... ! Depends on how we want to implement the behavior
self.x_aux_test = self.x_aux_train
self.y_aux_test = self.y_aux_train
self.mal_aux_labels_test = self.mal_aux_labels_train
# self.build_aux_generator(augment_size)
print(f"Got {len(self.x_aux_train)}/{aux_sample_size} samples for {num_backdoor_tasks} tasks!")
# def build_aux_generator(self, augment_size):
# # self.aux_test_generator = tf.data.Dataset.from_tensor_slices((self.x_aux_test, self.y_aux_test))
# if augment_size == 0:
# self.aux_test_generator = ImageDataGenerator()
# else:
# self.aux_test_generator = ImageDataGenerator(
# # rotation_range=15,
# horizontal_flip=True,
# width_shift_range=0.1,
# height_shift_range=0.1
# )
# self.aux_test_generator.fit(self.x_aux_test)
def get_aux_generator(self, batch_size, aux_size, augment_cifar, attack_type, max_test_batches):
if aux_size == 0:
test_dataset = tf.data.Dataset.from_tensor_slices((self.x_aux_test, self.mal_aux_labels_test)) \
.batch(batch_size, drop_remainder=False) \
.map(image_augmentation.pixel_pattern_if_needed(attack_type == 'pixel_pattern'), num_parallel_calls=tf.data.experimental.AUTOTUNE)
if max_test_batches is not None:
test_dataset = test_dataset.shuffle(max_test_batches)\
.take(max_test_batches)
return test_dataset \
.prefetch(tf.data.experimental.AUTOTUNE)
test_dataset = tf.data.Dataset.from_tensor_slices((self.x_aux_test, self.mal_aux_labels_test)) \
.repeat(aux_size) \
.batch(batch_size, drop_remainder=False) \
.map(image_augmentation.pixel_pattern_if_needed(attack_type == 'pixel_pattern'), num_parallel_calls=tf.data.experimental.AUTOTUNE)
if augment_cifar:
return test_dataset\
.map(image_augmentation.test_aux_augment, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
.prefetch(tf.data.experimental.AUTOTUNE)
else:
return test_dataset.prefetch(tf.data.experimental.AUTOTUNE)
def get_full_dataset(self, size):
x, y = np.concatenate(self.x_train), np.concatenate(self.y_train)
perms = np.random.choice(x.shape[0], size, replace=False)
x, y = x[perms, :], y[perms]
return x, y
class IIDGlobalDataset(GlobalDataset):
def __init__(self, x_train, y_train, num_clients, x_test, y_test):
super().__init__(x_test, y_test)
self.num_clients = num_clients
x_train, y_train = self.shuffle(x_train, y_train)
# Add to list
for client_id in range(num_clients):
data_samples = int(x_train.shape[0] / self.num_clients)
inds = (client_id * data_samples, (client_id + 1) * data_samples)
x, y = x_train[inds[0]:inds[1]], y_train[inds[0]:inds[1]]
self.x_train.append(x)
self.y_train.append(y)
def shuffle(self, x, y):
perms = np.random.permutation(x.shape[0])
return x[perms, :], y[perms]
def get_dataset_for_client(self, client_id):
# dataset = tf.data.Dataset.from_tensor_slices((self.x_train[client_id], self.y_train[client_id]))
# return dataset
return self.x_train[client_id], self.y_train[client_id]
class NonIIDGlobalDataset(GlobalDataset):
def __init__(self, x_train, y_train, x_test, y_test, num_clients):
"""Expects x_train to be a list, x_test one array"""
super().__init__(x_test, y_test)
self.x_train, self.y_train = x_train, y_train
def shuffle(self):
raise Exception("Shuffling is not supported on a non-IID dataset!")
def get_dataset_for_client(self, client_id):
return self.x_train[client_id], self.y_train[client_id]
class DirichletDistributionDivider():
"""Divides dataset according to dirichlet distribution"""
def __init__(self, x_train, y_train, train_aux, test_aux, exclude_aux, num_clients):
"""`train_aux` and `test_aux` should be indices for the `train` arrays."""
self.x_train = x_train
self.y_train = y_train
self.train_aux = train_aux
self.test_aux = test_aux
self.exclude_aux = exclude_aux
self.num_clients = num_clients
def build(self):
alpha = 0.9
cifar_classes = {}
for ind, x in enumerate(self.y_train):
label = x
if self.exclude_aux and (ind in self.train_aux or ind in self.test_aux):
continue
if label in cifar_classes:
cifar_classes[label].append(ind)
else:
cifar_classes[label] = [ind]
class_size = len(cifar_classes[0])
per_participant_list = defaultdict(list)
no_classes = len(cifar_classes.keys())
for n in range(no_classes):
np.random.shuffle(cifar_classes[n])
sampled_probabilities = class_size * np.random.dirichlet(
np.array(self.num_clients * [alpha]))
for user in range(self.num_clients):
no_imgs = int(round(sampled_probabilities[user]))
sampled_list = cifar_classes[n][:min(len(cifar_classes[n]), no_imgs)]
per_participant_list[user].extend(sampled_list)
cifar_classes[n] = cifar_classes[n][min(len(cifar_classes[n]), no_imgs):]
per_participant_train_x = [self.x_train[ind] for _, ind in per_participant_list.items()]
per_participant_train_y = [self.y_train[ind] for _, ind in per_participant_list.items()]
for n in range(self.num_clients):
perms = np.random.permutation(per_participant_train_x[n].shape[0])
per_participant_train_x[n] = per_participant_train_x[n][perms, :]
per_participant_train_y[n] = per_participant_train_y[n][perms]
return (per_participant_train_x, per_participant_train_y)
| 10,891 | 41.054054 | 146 |
py
|
fl-analysis
|
fl-analysis-master/src/data/southwest/__init__.py
|
import pickle
import os
import numpy as np
def load_data():
cifar_mean = np.array([0.5125891, 0.5335556, 0.5198208, 0.51035565, 0.5311504, 0.51707786, 0.51392424, 0.5343016, 0.5199328, 0.51595825, 0.535995, 0.5210931, 0.51837546, 0.5381541, 0.5226226, 0.5209901, 0.5406102, 0.52463686, 0.52302873, 0.5422941, 0.526108, 0.5250642, 0.54408634, 0.52755016, 0.5273931, 0.5461816, 0.5294758, 0.52915096, 0.5475848, 0.53056467, 0.53033257, 0.5487945, 0.53142065, 0.53100014, 0.5493823, 0.532008, 0.5318505, 0.5500815, 0.53262335, 0.53190315, 0.550054, 0.53259194, 0.53190464, 0.5499133, 0.53247046, 0.53181446, 0.54970396, 0.5322407, 0.53149545, 0.54939365, 0.53170216, 0.53139186, 0.5492686, 0.5315995, 0.53134245, 0.5492373, 0.5316772, 0.53092, 0.54887486, 0.5312779, 0.52984685, 0.54790294, 0.53024596, 0.5292543, 0.5475318, 0.53011733, 0.5286725, 0.5470605, 0.5298459, 0.52755964, 0.5462715, 0.5291427, 0.52567685, 0.54489666, 0.52795166, 0.52395415, 0.5434869, 0.52681994, 0.5218709, 0.5416036, 0.52525735, 0.51892465, 0.5388906, 0.52305186, 0.5160373, 0.5363746, 0.5209598, 0.51396006, 0.5345421, 0.5193824, 0.51134723, 0.532196, 0.517654, 0.5104877, 0.53127444, 0.51722926, 0.5101904, 0.5305694, 0.51515365, 0.50723326, 0.52729446, 0.5116333, 0.51065314, 0.53010774, 0.5141287, 0.5120484, 0.5311018, 0.5145084, 0.51433307, 0.5330152, 0.5158267, 0.5169507, 0.5352452, 0.5177454, 0.5190629, 0.5368237, 0.5191274, 0.52094585, 0.5383659, 0.52044463, 0.5231099, 0.5401586, 0.5219691, 0.5250054, 0.54164994, 0.5231126, 0.5265287, 0.5431024, 0.5243246, 0.527122, 0.54356045, 0.52482873, 0.52799946, 0.54437166, 0.5255006, 0.5280943, 0.5443916, 0.5255538, 0.52800775, 0.5441322, 0.52538264, 0.5272869, 0.54332507, 0.52461404, 0.52708703, 0.5431628, 0.5242423, 0.5274033, 0.5433042, 0.52435017, 0.5273274, 0.5431927, 0.5242712, 0.5270461, 0.5430902, 0.52421975, 0.5260983, 0.5423093, 0.52339584, 0.52580154, 0.54231465, 0.52348256, 0.52505565, 0.54183424, 0.5231322, 0.5237481, 0.5409074, 0.522347, 0.5214467, 0.5391383, 0.520837, 0.5197676, 0.5377843, 0.51964325, 0.5174169, 0.5357008, 0.5177166, 0.51449, 0.5332585, 0.51589715, 0.5119284, 0.53136545, 0.5144992, 0.50997263, 0.52995944, 0.5133403, 0.50717854, 0.52768564, 0.5117646, 0.5064974, 0.526998, 0.5115747, 0.5087227, 0.52803206, 0.5107892, 0.5047709, 0.5235213, 0.50595963, 0.50773895, 0.5257103, 0.5077519, 0.50922745, 0.5265793, 0.5080623, 0.5112224, 0.5278405, 0.5087765, 0.51357174, 0.5294538, 0.5100967, 0.51561373, 0.53096634, 0.5113942, 0.5176565, 0.5324904, 0.51281357, 0.5202049, 0.53453314, 0.51450443, 0.5220908, 0.53599066, 0.51567423, 0.523728, 0.5374099, 0.5169516, 0.5246113, 0.53810054, 0.5176903, 0.5249225, 0.53840846, 0.51783085, 0.5250187, 0.5384936, 0.5179714, 0.5246734, 0.53794736, 0.51762265, 0.52400243, 0.5370531, 0.5168342, 0.5241845, 0.5371431, 0.51674104, 0.52449924, 0.5372522, 0.51672924, 0.5242473, 0.53696275, 0.51642936, 0.5235294, 0.5365606, 0.5161019, 0.52258, 0.53588074, 0.5154991, 0.5221798, 0.535868, 0.5153978, 0.52158374, 0.53566223, 0.5151015, 0.52009076, 0.5346052, 0.51411974, 0.5178056, 0.5329729, 0.5127169, 0.5159323, 0.53150475, 0.51149535, 0.5138075, 0.52980405, 0.51005995, 0.5114662, 0.5282267, 0.50894624, 0.50914806, 0.52683425, 0.5080967, 0.50671124, 0.5253311, 0.50703907, 0.50428075, 0.52369565, 0.5061419, 0.50427943, 0.52375823, 0.50676244, 0.50769454, 0.5258301, 0.50687677, 0.50341964, 0.5208474, 0.50148106, 0.50609, 0.5224348, 0.5025353, 0.50786495, 0.5232852, 0.50283676, 0.5093051, 0.52371645, 0.502746, 0.51104456, 0.5243688, 0.5031271, 0.5130025, 0.5254102, 0.50404936, 0.51548815, 0.52718323, 0.50571525, 0.5182901, 0.5295068, 0.5077093, 0.5202635, 0.53101903, 0.50896144, 0.52161074, 0.5320202, 0.5097878, 0.5223921, 0.5325996, 0.51047987, 0.5221468, 0.5323063, 0.51019615, 0.52245516, 0.5325286, 0.5104237, 0.522466, 0.5323604, 0.51043147, 0.5216972, 0.531244, 0.50954396, 0.5217702, 0.5311202, 0.5091887, 0.52198994, 0.531162, 0.50911826, 0.52152467, 0.5307484, 0.50871134, 0.5208862, 0.53046817, 0.50849295, 0.51973104, 0.52963537, 0.5076733, 0.518899, 0.52925825, 0.5072149, 0.5185742, 0.5293947, 0.5070613, 0.5171974, 0.5285756, 0.5062711, 0.5151835, 0.52734274, 0.5052106, 0.5133427, 0.526205, 0.5043163, 0.51124775, 0.52483165, 0.5032927, 0.5095077, 0.52403444, 0.5028577, 0.50765795, 0.52329034, 0.50265884, 0.5052181, 0.52209514, 0.5019957, 0.50280863, 0.52072275, 0.50135434, 0.50321615, 0.52139026, 0.5026594, 0.5066728, 0.52361757, 0.5029942, 0.5021932, 0.51821595, 0.49708524, 0.50442374, 0.51916325, 0.49741644, 0.5060885, 0.51950955, 0.49727476, 0.5069638, 0.519059, 0.4964266, 0.5087284, 0.51950014, 0.49678195, 0.51071334, 0.5201907, 0.49735886, 0.51288563, 0.5213807, 0.49839514, 0.51536316, 0.5230849, 0.49987316, 0.51695275, 0.5241572, 0.50066316, 0.5187275, 0.52559185, 0.50181437, 0.5197409, 0.5263634, 0.5026216, 0.5193163, 0.5257978, 0.5021233, 0.5194247, 0.5257926, 0.50220615, 0.51956624, 0.52580345, 0.50242853, 0.5187764, 0.52467954, 0.5014981, 0.51859564, 0.524318, 0.5010181, 0.5188952, 0.5245507, 0.5012477, 0.5188869, 0.52466905, 0.50141686, 0.5179967, 0.52408093, 0.5008127, 0.51634544, 0.522779, 0.49937427, 0.51529515, 0.5221451, 0.49863207, 0.5147205, 0.5221899, 0.49849948, 0.5136438, 0.5219193, 0.49821168, 0.512231, 0.52134496, 0.49777523, 0.51080537, 0.5208963, 0.49745238, 0.5089362, 0.52011764, 0.49691004, 0.50721794, 0.5195741, 0.49661744, 0.5056025, 0.51918864, 0.49675563, 0.5035451, 0.51855266, 0.49662897, 0.50160587, 0.51794636, 0.49665856, 0.5021448, 0.5190644, 0.49849382, 0.50551444, 0.5212539, 0.49905226, 0.500901, 0.515494, 0.49274132, 0.50242263, 0.5154062, 0.49202266, 0.50367904, 0.51508915, 0.4912601, 0.50437975, 0.5142034, 0.49006906, 0.5060234, 0.5143099, 0.49016085, 0.5077508, 0.51451904, 0.4902386, 0.50932765, 0.51468265, 0.4902106, 0.51161623, 0.51585156, 0.49129432, 0.51345116, 0.51689655, 0.4920814, 0.5153487, 0.5183447, 0.49324933, 0.51650304, 0.5192187, 0.49404824, 0.5158884, 0.51828694, 0.49310726, 0.5160163, 0.51822007, 0.4932475, 0.5161736, 0.5182637, 0.49356017, 0.51546353, 0.51731044, 0.4927994, 0.5151736, 0.5170156, 0.4923346, 0.5157531, 0.517689, 0.49301156, 0.51606673, 0.5180764, 0.49351144, 0.5152466, 0.5174159, 0.4927852, 0.5140167, 0.5166419, 0.49183553, 0.51290643, 0.5161224, 0.49113527, 0.5120536, 0.51620877, 0.49119917, 0.5106604, 0.51582474, 0.49097508, 0.5096143, 0.5157343, 0.49093577, 0.5088257, 0.51619846, 0.49136695, 0.5070477, 0.51582706, 0.49112934, 0.5051287, 0.51543283, 0.4907929, 0.5036252, 0.51533824, 0.49112743, 0.5017869, 0.5151173, 0.49134406, 0.5001115, 0.51498383, 0.49185488, 0.50085896, 0.51651156, 0.49407417, 0.5038685, 0.51837873, 0.49433786, 0.49883226, 0.5121282, 0.48754528, 0.5000758, 0.5114122, 0.4863624, 0.5008112, 0.5102833, 0.4848477, 0.5017778, 0.50946885, 0.48368028, 0.50302577, 0.5089437, 0.48305368, 0.50465435, 0.50862306, 0.48278242, 0.5060686, 0.50833166, 0.4824641, 0.5082394, 0.5090778, 0.48314664, 0.51039714, 0.50986594, 0.48389146, 0.5121611, 0.51069266, 0.484579, 0.51295584, 0.51114124, 0.4849066, 0.5123309, 0.51033, 0.48397908, 0.51237476, 0.51005167, 0.48390156, 0.5126856, 0.5102092, 0.48433435, 0.51228243, 0.50971967, 0.48407215, 0.5118439, 0.5095021, 0.4836986, 0.51235306, 0.51017654, 0.48434585, 0.5126779, 0.5105838, 0.4847964, 0.5120654, 0.5102172, 0.48434183, 0.51134646, 0.51007485, 0.483996, 0.510205, 0.50959635, 0.4832043, 0.5089302, 0.5094197, 0.48296732, 0.50774676, 0.5095382, 0.48338902, 0.50677013, 0.5098224, 0.48369464, 0.50615335, 0.51083356, 0.48462817, 0.50475997, 0.5111398, 0.48498636, 0.5028428, 0.51094466, 0.48488927, 0.5013838, 0.5112705, 0.4854046, 0.49967837, 0.5114891, 0.48588443, 0.4981652, 0.511602, 0.48674685, 0.4992814, 0.5136052, 0.4894055, 0.50194436, 0.51530254, 0.4893368, 0.49646285, 0.50839293, 0.48204112, 0.49751297, 0.50725454, 0.48049524, 0.4980085, 0.50569284, 0.47851276, 0.49864239, 0.5044038, 0.4768957, 0.5003046, 0.5039402, 0.47636697, 0.5018272, 0.50317806, 0.4758802, 0.5025877, 0.5020061, 0.47498694, 0.5044948, 0.5020804, 0.4750632, 0.50692207, 0.50274223, 0.47567427, 0.50856435, 0.5031034, 0.47595063, 0.50887084, 0.5028764, 0.47573155, 0.50831467, 0.50201786, 0.474696, 0.50878197, 0.5020216, 0.47488257, 0.5092103, 0.5021663, 0.4753759, 0.5088528, 0.501753, 0.47515774, 0.5090174, 0.5022259, 0.47556934, 0.50975955, 0.50308096, 0.47625992, 0.5098739, 0.50340456, 0.47644824, 0.50900525, 0.5029853, 0.4759902, 0.50806814, 0.50276726, 0.47558382, 0.5074465, 0.50283927, 0.47528836, 0.5062712, 0.5029108, 0.4752889, 0.50548434, 0.5036701, 0.4762001, 0.50404775, 0.5039275, 0.4764725, 0.50313514, 0.5049122, 0.47745422, 0.5022928, 0.5060837, 0.47858214, 0.50063115, 0.50638074, 0.47893777, 0.49917746, 0.50693405, 0.47952414, 0.49736926, 0.50737053, 0.48005804, 0.49621025, 0.5079847, 0.48137575, 0.4975825, 0.51045513, 0.48442462, 0.49982464, 0.51189977, 0.48419452, 0.49396628, 0.5043872, 0.47629082, 0.49500015, 0.50310415, 0.47462815, 0.4952585, 0.50118107, 0.47242218, 0.49591368, 0.49979934, 0.4707737, 0.49758473, 0.49904215, 0.46989417, 0.49899518, 0.49794295, 0.46906137, 0.49925107, 0.49597722, 0.46748066, 0.5008285, 0.49532843, 0.4670364, 0.50324535, 0.49585804, 0.467465, 0.50472397, 0.49579772, 0.46731308, 0.5053876, 0.49565825, 0.46742582, 0.5056196, 0.49522242, 0.4670161, 0.5063051, 0.4951073, 0.46714735, 0.5068745, 0.49522048, 0.46752492, 0.5073496, 0.49560735, 0.46799144, 0.507885, 0.49643242, 0.4688345, 0.50824773, 0.49709204, 0.46944156, 0.5082332, 0.4975203, 0.4696642, 0.5075482, 0.49744686, 0.4694545, 0.5059482, 0.4967136, 0.46864048, 0.50503707, 0.49667683, 0.46843484, 0.5044864, 0.4975461, 0.46908566, 0.50388604, 0.49857575, 0.46996602, 0.5026681, 0.49931762, 0.4705422, 0.50154334, 0.50041497, 0.47160476, 0.5003431, 0.5015314, 0.47257248, 0.49897608, 0.5024307, 0.4734876, 0.4971384, 0.50264883, 0.47366676, 0.49519473, 0.50315595, 0.47422406, 0.49401098, 0.50414294, 0.47581944, 0.4957519, 0.5071924, 0.47934458, 0.49893367, 0.5095179, 0.48008975, 0.4928944, 0.5017452, 0.4717997, 0.49338293, 0.49989566, 0.4696745, 0.49354276, 0.4976462, 0.46727362, 0.49438575, 0.49627277, 0.46561894, 0.49573293, 0.49502394, 0.46427327, 0.49726063, 0.49377844, 0.4633081, 0.49748582, 0.49128968, 0.46113962, 0.4989005, 0.49013296, 0.46027887, 0.5010044, 0.49008757, 0.46031466, 0.5025262, 0.48988083, 0.46005243, 0.50376666, 0.48998192, 0.46057153, 0.5048968, 0.49004027, 0.46088323, 0.50567216, 0.4897594, 0.4608909, 0.50619817, 0.48958594, 0.4609467, 0.5066509, 0.4898532, 0.46136156, 0.50730014, 0.49089676, 0.462447, 0.50729716, 0.49139687, 0.4630397, 0.50767636, 0.4923075, 0.4638307, 0.50731945, 0.49261403, 0.46379933, 0.505517, 0.49204, 0.46285215, 0.5039381, 0.4918244, 0.4623946, 0.50265783, 0.4921657, 0.46253702, 0.50201833, 0.49335298, 0.46347064, 0.5012656, 0.49502248, 0.4648494, 0.500538, 0.4967403, 0.46637148, 0.49912858, 0.49781212, 0.46717298, 0.4977372, 0.49885482, 0.46822107, 0.49562797, 0.49895254, 0.46833354, 0.49358562, 0.49967587, 0.46917835, 0.49239615, 0.5010549, 0.47097722, 0.49459463, 0.504631, 0.475064, 0.4979264, 0.50716776, 0.4759557, 0.4918032, 0.49916625, 0.46748352, 0.49213278, 0.49701986, 0.46518925, 0.492414, 0.49463385, 0.4625616, 0.4932275, 0.49297503, 0.46055982, 0.49494243, 0.49193472, 0.45942247, 0.49634242, 0.4903976, 0.45815954, 0.49722457, 0.4882345, 0.4563171, 0.49828753, 0.48629057, 0.45481145, 0.49942532, 0.48483828, 0.45377147, 0.5017263, 0.48501387, 0.45419106, 0.50325686, 0.4850058, 0.4545724, 0.5039892, 0.48445266, 0.4544924, 0.5051761, 0.48443985, 0.45475495, 0.5060425, 0.48451993, 0.45506874, 0.50670314, 0.4848033, 0.4554729, 0.5068166, 0.48527336, 0.45600176, 0.5070288, 0.48594874, 0.45693257, 0.5075, 0.48708707, 0.4578876, 0.5069521, 0.4875365, 0.45782277, 0.5054159, 0.48749712, 0.45726743, 0.5035466, 0.4874107, 0.4567758, 0.5017038, 0.4874609, 0.45657945, 0.50083023, 0.48875546, 0.45755926, 0.49977908, 0.49037874, 0.45895723, 0.49890742, 0.49226987, 0.46070153, 0.49780706, 0.4937254, 0.46175405, 0.4959546, 0.494491, 0.4624494, 0.4938109, 0.49503827, 0.46304774, 0.49181435, 0.49606878, 0.4641168, 0.49075583, 0.4977087, 0.46593586, 0.493292, 0.5019984, 0.47069708, 0.4967748, 0.50465685, 0.4717481, 0.49010172, 0.49596816, 0.4626404, 0.49042368, 0.49353567, 0.46001673, 0.49080706, 0.49099243, 0.4572523, 0.4921259, 0.4895786, 0.45560113, 0.49457067, 0.4889396, 0.45486704, 0.4959915, 0.4872486, 0.45347446, 0.49660495, 0.4847584, 0.45158485, 0.49721727, 0.48199192, 0.4493901, 0.49842682, 0.48014995, 0.4480981, 0.5010955, 0.48024452, 0.44856983, 0.5029377, 0.4802045, 0.44899738, 0.5042913, 0.48013192, 0.44936687, 0.5058132, 0.48020938, 0.4498175, 0.5067236, 0.4802015, 0.44999287, 0.5077323, 0.48068598, 0.45060402, 0.5077365, 0.48096684, 0.45091125, 0.5077012, 0.48149258, 0.4515584, 0.5078106, 0.4825217, 0.4524005, 0.506605, 0.48267448, 0.45216075, 0.50480986, 0.48264015, 0.45152882, 0.50299495, 0.48267096, 0.45105693, 0.50172853, 0.48341462, 0.45151478, 0.50032693, 0.48467547, 0.45240417, 0.49895766, 0.4861675, 0.4536566, 0.4976245, 0.4877157, 0.4549862, 0.49636078, 0.4892411, 0.4560303, 0.49428403, 0.49022096, 0.45697924, 0.49180847, 0.4909277, 0.45778778, 0.48999724, 0.49230546, 0.45898786, 0.4893065, 0.494253, 0.46093097, 0.49204266, 0.4991796, 0.46630606, 0.49550968, 0.50181705, 0.46721423, 0.48863742, 0.49280503, 0.4579277, 0.48859853, 0.4897394, 0.45469096, 0.48882475, 0.48680142, 0.4516573, 0.49046448, 0.48538777, 0.4501436, 0.4935391, 0.48516476, 0.44997358, 0.4955372, 0.48386702, 0.44903612, 0.49636477, 0.48131943, 0.44729123, 0.49690264, 0.47835723, 0.44494087, 0.49877423, 0.47686034, 0.4438899, 0.50176084, 0.4768353, 0.44408223, 0.5042957, 0.477343, 0.44499835, 0.50582796, 0.47715458, 0.44520703, 0.5071629, 0.47681445, 0.44540218, 0.5081276, 0.47671828, 0.44564548, 0.50898445, 0.4769926, 0.44604456, 0.50934464, 0.47755072, 0.44655782, 0.50911057, 0.4777294, 0.44670698, 0.50882787, 0.47842446, 0.4472453, 0.5075528, 0.47873765, 0.44718543, 0.5050821, 0.4781854, 0.4461337, 0.5033845, 0.47834134, 0.44580433, 0.5022064, 0.47943324, 0.44647598, 0.5007013, 0.4810909, 0.44763651, 0.49922243, 0.4827641, 0.44886023, 0.49722716, 0.4839076, 0.4496609, 0.49541944, 0.48530293, 0.45072067, 0.49316433, 0.48645008, 0.45187888, 0.49026492, 0.48719046, 0.4528157, 0.48836845, 0.48866087, 0.4540422, 0.4882927, 0.49136755, 0.45654684, 0.4910425, 0.49652937, 0.46207708, 0.49370152, 0.49863848, 0.4624133, 0.48657668, 0.48924345, 0.45284316, 0.4867835, 0.4861309, 0.44971207, 0.4876669, 0.4834556, 0.4470639, 0.48964882, 0.4820599, 0.44565332, 0.4928811, 0.48187423, 0.44568536, 0.49492255, 0.48032874, 0.44460174, 0.49619076, 0.4779162, 0.44288433, 0.49764556, 0.47569376, 0.4412587, 0.50030124, 0.4746849, 0.4408077, 0.5034509, 0.47453582, 0.44089746, 0.5063687, 0.47512567, 0.4417419, 0.5077552, 0.47457618, 0.4414889, 0.5086126, 0.47358114, 0.44109887, 0.50960577, 0.4734714, 0.4413365, 0.5101132, 0.47348964, 0.44145644, 0.5102535, 0.47361833, 0.44143867, 0.5103364, 0.47384787, 0.4415993, 0.5099839, 0.4744189, 0.44202864, 0.5087035, 0.4747229, 0.44187722, 0.5069964, 0.47504497, 0.44177803, 0.5050453, 0.47550285, 0.44180676, 0.5029002, 0.47600633, 0.44186854, 0.5012443, 0.4777499, 0.44327393, 0.49890348, 0.47885805, 0.44391027, 0.49647012, 0.47983184, 0.44441485, 0.4944723, 0.48138154, 0.44565898, 0.49129403, 0.48196852, 0.4461107, 0.4883029, 0.48287582, 0.44705695, 0.48648065, 0.48472503, 0.44855005, 0.48675364, 0.48811013, 0.4517, 0.4898694, 0.49387822, 0.4576936, 0.49267325, 0.4965016, 0.45861888, 0.48472813, 0.48607555, 0.44821176, 0.4851703, 0.4828152, 0.44512334, 0.48680007, 0.48050714, 0.44295296, 0.48943, 0.47946402, 0.44205004, 0.4921082, 0.47849604, 0.44130418, 0.49403518, 0.47665903, 0.43996602, 0.49607074, 0.4748696, 0.43872422, 0.49874878, 0.47363934, 0.4379633, 0.50116795, 0.47220817, 0.4371685, 0.5036451, 0.4712333, 0.43659544, 0.5060372, 0.4710541, 0.43675196, 0.5080405, 0.47097656, 0.436905, 0.50906533, 0.4701898, 0.4366737, 0.5095589, 0.46971977, 0.43645054, 0.5095605, 0.46912843, 0.43589434, 0.5096588, 0.46888188, 0.43547544, 0.50995463, 0.46909863, 0.43539128, 0.50998944, 0.47021738, 0.43626815, 0.50925773, 0.4711538, 0.43680277, 0.50770724, 0.47178468, 0.4370732, 0.50496185, 0.47179237, 0.43667352, 0.50200653, 0.4718798, 0.43640015, 0.4993973, 0.47282925, 0.43727824, 0.49696958, 0.47378168, 0.43797952, 0.49442086, 0.47489202, 0.43858007, 0.49180892, 0.4760893, 0.4394226, 0.48863047, 0.4769292, 0.43997362, 0.48606238, 0.478356, 0.4412558, 0.48466715, 0.48089924, 0.44332278, 0.4849401, 0.4846041, 0.44666097, 0.48882532, 0.4914228, 0.4535407, 0.49268207, 0.4952098, 0.45585966, 0.48433807, 0.4841154, 0.4449786, 0.48421884, 0.48015177, 0.44136867, 0.48533133, 0.47725466, 0.43870497, 0.48759267, 0.47559616, 0.43719527, 0.4899907, 0.47400004, 0.43588513, 0.49257335, 0.4728307, 0.4350741, 0.49549356, 0.471993, 0.43457165, 0.49732798, 0.46990857, 0.4329577, 0.4993391, 0.4679355, 0.43147355, 0.50131243, 0.46622264, 0.43016183, 0.5035003, 0.46563327, 0.43004262, 0.50586057, 0.46594933, 0.43063706, 0.50671315, 0.46505433, 0.43027684, 0.50720286, 0.4644783, 0.43007416, 0.5065652, 0.4632923, 0.42896625, 0.50674206, 0.463173, 0.42846307, 0.5072345, 0.4635861, 0.42852175, 0.50755554, 0.46496657, 0.4294866, 0.5070834, 0.46635282, 0.43042985, 0.50499576, 0.466583, 0.43030617, 0.5020161, 0.46619397, 0.42961118, 0.49960917, 0.46692374, 0.43010715, 0.49671757, 0.46757874, 0.43065038, 0.494029, 0.4684111, 0.43146124, 0.4915086, 0.46969151, 0.4324231, 0.48884016, 0.47106746, 0.43367115, 0.48577976, 0.47217187, 0.43445578, 0.48384765, 0.4743089, 0.4362553, 0.48255178, 0.47712544, 0.43854022, 0.4827859, 0.48104542, 0.4419203, 0.4875408, 0.4889175, 0.44980296, 0.49222857, 0.49362954, 0.4531146, 0.48345134, 0.48189873, 0.44176653, 0.48254034, 0.47699937, 0.4373796, 0.4828492, 0.47326526, 0.43395284, 0.48428833, 0.47077665, 0.43156943, 0.48657024, 0.46893388, 0.42990613, 0.4895016, 0.46800038, 0.42916775, 0.49225304, 0.4670462, 0.4283955, 0.4933782, 0.46426156, 0.42603403, 0.49568698, 0.46265113, 0.4248805, 0.49803782, 0.4613017, 0.4239555, 0.49997136, 0.46038866, 0.4236022, 0.50222784, 0.46056995, 0.42400566, 0.5032506, 0.45997098, 0.42384174, 0.50299764, 0.45844653, 0.4227519, 0.50191563, 0.4568102, 0.4212024, 0.50173944, 0.45661685, 0.4206811, 0.50181127, 0.45671672, 0.42050356, 0.5017804, 0.45769, 0.42103347, 0.50113153, 0.45878524, 0.42163467, 0.4995472, 0.45958441, 0.42210057, 0.49701414, 0.45966932, 0.42187333, 0.4948802, 0.46053046, 0.42259118, 0.49240312, 0.46150187, 0.42342836, 0.48942497, 0.46234503, 0.42430562, 0.4871664, 0.4639641, 0.42587113, 0.48441172, 0.4653442, 0.42727435, 0.48171046, 0.46682847, 0.42855245, 0.48050335, 0.46968222, 0.4309223, 0.47995222, 0.4733025, 0.4338385, 0.4811531, 0.47816625, 0.4381573, 0.4867816, 0.48707005, 0.44702503, 0.49141234, 0.49194038, 0.45037937, 0.48170608, 0.47924593, 0.43815848, 0.47971013, 0.47332907, 0.43277887, 0.47890407, 0.46855375, 0.42848718, 0.48002294, 0.4656702, 0.4257275, 0.48198983, 0.46358782, 0.4237998, 0.48515794, 0.46271095, 0.42309296, 0.48783034, 0.4617173, 0.42214933, 0.48901296, 0.45926648, 0.41988516, 0.49089366, 0.45746103, 0.41848654, 0.49287525, 0.45598665, 0.41764778, 0.49450523, 0.45486867, 0.4170744, 0.4964473, 0.45471716, 0.41693056, 0.4979029, 0.45449352, 0.41694745, 0.49695715, 0.45247337, 0.4152949, 0.49592242, 0.4508635, 0.4137969, 0.49496156, 0.44978628, 0.4124712, 0.494225, 0.44924206, 0.4118363, 0.4940134, 0.45002058, 0.4123555, 0.49361187, 0.4513441, 0.41334206, 0.49261862, 0.45272407, 0.41449594, 0.4910123, 0.4537939, 0.4151827, 0.4891517, 0.45477542, 0.4160118, 0.4870745, 0.45586804, 0.4169564, 0.48419368, 0.45671332, 0.4176718, 0.48176137, 0.45807993, 0.41909894, 0.47938812, 0.45961028, 0.42068803, 0.47727743, 0.4615559, 0.42247027, 0.47666398, 0.46509635, 0.42561924, 0.47699735, 0.4695791, 0.42939344, 0.47910586, 0.47532177, 0.43453637, 0.48533195, 0.48489404, 0.4440362, 0.4909336, 0.49069473, 0.44812757, 0.4800212, 0.47689426, 0.4350016, 0.47652468, 0.46948406, 0.4282753, 0.4748533, 0.46400884, 0.42318982, 0.4753451, 0.4606548, 0.41995788, 0.4770718, 0.45832232, 0.41774407, 0.47998485, 0.4573137, 0.41691777, 0.4826587, 0.45660785, 0.41644394, 0.48434055, 0.45490175, 0.4147834, 0.4857314, 0.45285335, 0.4131834, 0.48699182, 0.45115927, 0.41208753, 0.4885743, 0.45024708, 0.41164368, 0.48958632, 0.44931144, 0.41066742, 0.4908836, 0.44885212, 0.41009766, 0.490033, 0.4469305, 0.40853015, 0.48908585, 0.44562486, 0.40742284, 0.4880996, 0.4444442, 0.40593874, 0.48719195, 0.4437357, 0.40511867, 0.4868416, 0.44438827, 0.40560266, 0.48649922, 0.4457055, 0.40672186, 0.48552075, 0.4469248, 0.40770343, 0.48465618, 0.4485857, 0.4090899, 0.48288673, 0.44959667, 0.40996966, 0.48128062, 0.45089018, 0.41112146, 0.47870058, 0.45166087, 0.41180938, 0.4761865, 0.45259836, 0.41288528, 0.47404155, 0.4540715, 0.41436234, 0.47269, 0.45650285, 0.4165419, 0.47286126, 0.4606648, 0.42024902, 0.47428715, 0.46630406, 0.42518413, 0.47749433, 0.47324675, 0.4316184, 0.48468167, 0.4836629, 0.4418336, 0.49038738, 0.4894758, 0.44599137, 0.4786051, 0.4748186, 0.43217805, 0.4739541, 0.46643615, 0.424547, 0.47161707, 0.46041438, 0.41872394, 0.47088465, 0.45603442, 0.41448262, 0.47172025, 0.45304585, 0.4116466, 0.47422644, 0.45193648, 0.41083297, 0.47647485, 0.45104218, 0.4102996, 0.47871363, 0.45009074, 0.40941817, 0.47970176, 0.44806504, 0.40758622, 0.48077902, 0.44673073, 0.4066285, 0.48239556, 0.4461381, 0.406507, 0.48310184, 0.44495335, 0.40536308, 0.48341256, 0.44372135, 0.4040446, 0.4827836, 0.44205242, 0.40253726, 0.48210746, 0.44118226, 0.40183714, 0.4814093, 0.44043845, 0.40085673, 0.48094538, 0.44009113, 0.4003602, 0.4807507, 0.44092187, 0.40108305, 0.48013055, 0.44185352, 0.4018946, 0.47915024, 0.44284657, 0.402518, 0.4784955, 0.44449207, 0.40387926, 0.47708333, 0.44557968, 0.40490234, 0.47542992, 0.44651788, 0.405837, 0.47325695, 0.44724196, 0.40666974, 0.47111505, 0.4484366, 0.408051, 0.46916032, 0.44997087, 0.40946445, 0.4687677, 0.4528788, 0.4120764, 0.46996024, 0.4576584, 0.41642952, 0.47214544, 0.4639683, 0.42203125, 0.47641894, 0.4718391, 0.42949817, 0.48446226, 0.48280293, 0.44016317, 0.4904502, 0.4889342, 0.44461867, 0.47747833, 0.4732471, 0.4297045, 0.47209415, 0.46440262, 0.4217454, 0.46893883, 0.4577043, 0.41530457, 0.46747503, 0.4528644, 0.41063258, 0.46705288, 0.44910118, 0.40710685, 0.4689187, 0.4476753, 0.40596816, 0.4710917, 0.44706544, 0.4054308, 0.47322798, 0.44637996, 0.40480036, 0.47401327, 0.44458035, 0.40312898, 0.47440195, 0.44284812, 0.4015886, 0.4755172, 0.44188988, 0.40109208, 0.47648847, 0.44119617, 0.40067655, 0.47685918, 0.4402483, 0.39973855, 0.47609216, 0.4386831, 0.3982288, 0.47522417, 0.43767172, 0.39710727, 0.47519797, 0.43756765, 0.3967371, 0.47549462, 0.43796265, 0.39704254, 0.47474524, 0.43806058, 0.39721614, 0.474436, 0.43934363, 0.39846614, 0.4740963, 0.4407958, 0.39963016, 0.4731836, 0.44178876, 0.40022632, 0.47166526, 0.44230458, 0.40058738, 0.47036082, 0.4431542, 0.4014316, 0.46896273, 0.44433352, 0.4027738, 0.46788573, 0.44630224, 0.40493828, 0.46593282, 0.44764116, 0.40617445, 0.46581706, 0.45055774, 0.40885815, 0.467148, 0.4552901, 0.413343, 0.47015914, 0.46221632, 0.41955948, 0.47590438, 0.47117254, 0.42809808, 0.48453513, 0.48240784, 0.43900448, 0.4907998, 0.48875153, 0.44391593, 0.4766856, 0.47221425, 0.4280176, 0.47078082, 0.46314657, 0.41983324, 0.46716425, 0.45636868, 0.41335848, 0.46501, 0.45111534, 0.4082495, 0.46391618, 0.44701654, 0.4043772, 0.46490332, 0.44515824, 0.40265593, 0.4667201, 0.44454247, 0.40199926, 0.46827796, 0.44361395, 0.40106985, 0.4689617, 0.44204637, 0.39977044, 0.46984524, 0.44108343, 0.39902535, 0.47006163, 0.43934757, 0.39769718, 0.4703922, 0.43820643, 0.3969725, 0.47107235, 0.4376927, 0.39650938, 0.47022298, 0.43624225, 0.39514133, 0.46930453, 0.43529567, 0.39403427, 0.46982712, 0.43576032, 0.39404446, 0.47017223, 0.43618742, 0.39435542, 0.4694003, 0.4360398, 0.39428154, 0.46953455, 0.43755886, 0.3957795, 0.46984398, 0.43946698, 0.3975351, 0.46860978, 0.4398625, 0.39742172, 0.46743664, 0.44045395, 0.39771962, 0.4664153, 0.441422, 0.39881715, 0.46539003, 0.4425812, 0.4001243, 0.46471027, 0.44455647, 0.40233374, 0.4631384, 0.445892, 0.4036109, 0.46269837, 0.4483479, 0.40574276, 0.46464917, 0.45352122, 0.4106488, 0.46913794, 0.46151072, 0.4180183, 0.47612476, 0.47142535, 0.42751458, 0.48562384, 0.48328254, 0.4391903, 0.49065673, 0.48847106, 0.44337398, 0.4762089, 0.47174934, 0.42720953, 0.46992597, 0.46254256, 0.41864532, 0.46587697, 0.455665, 0.41211063, 0.4632615, 0.4503376, 0.40690297, 0.4615228, 0.44599396, 0.40270564, 0.46145, 0.44347313, 0.40029544, 0.46229553, 0.44214556, 0.39903614, 0.4633629, 0.4409681, 0.39787462, 0.46444836, 0.4400592, 0.3970369, 0.46541107, 0.4395973, 0.396772, 0.46545494, 0.4378673, 0.39552382, 0.46544418, 0.4364894, 0.3945714, 0.46567369, 0.4357634, 0.39408976, 0.46517593, 0.4347266, 0.3931963, 0.4646447, 0.4339637, 0.39231563, 0.46494365, 0.43425128, 0.3921338, 0.4652371, 0.4346358, 0.39233696, 0.46531063, 0.4351833, 0.39275497, 0.4658149, 0.4368641, 0.3942792, 0.46608558, 0.43857804, 0.39584464, 0.46512866, 0.439183, 0.3959698, 0.46426895, 0.43989575, 0.39645782, 0.4629032, 0.4403246, 0.3970551, 0.46206772, 0.44136095, 0.398182, 0.4615198, 0.4430804, 0.40006167, 0.46072856, 0.44482896, 0.4017565, 0.4604609, 0.44716954, 0.40379632, 0.4634732, 0.45312282, 0.40951848, 0.4690215, 0.4616619, 0.4175838, 0.4761963, 0.4715245, 0.42697492, 0.48598465, 0.48345333, 0.43864906, 0.49113593, 0.4888251, 0.44338593, 0.4773457, 0.47286874, 0.4278035, 0.47095615, 0.46379542, 0.41921082, 0.4659525, 0.4563613, 0.41217622, 0.4628005, 0.45087418, 0.40681058, 0.46057633, 0.44652542, 0.40242973, 0.4597378, 0.44349438, 0.39950204, 0.45995352, 0.44172272, 0.39771512, 0.46068993, 0.44055927, 0.39651066, 0.46182066, 0.43993324, 0.39588073, 0.46251452, 0.43930632, 0.39557284, 0.4624306, 0.43774733, 0.39479008, 0.46249196, 0.43669358, 0.3942852, 0.46214354, 0.43576548, 0.3936931, 0.46174237, 0.43486437, 0.39285848, 0.46176487, 0.43439063, 0.39235017, 0.4621464, 0.434697, 0.3922237, 0.4625178, 0.43519288, 0.39243484, 0.4627419, 0.43585622, 0.39291012, 0.4626646, 0.43701527, 0.3939037, 0.4623528, 0.4381942, 0.39481428, 0.46204403, 0.43909293, 0.39528152, 0.46148244, 0.439675, 0.39561906, 0.46060348, 0.4404073, 0.39633393, 0.45987886, 0.4413812, 0.39732507, 0.4594978, 0.442881, 0.39890796, 0.4589982, 0.44455284, 0.4007941, 0.4595094, 0.4473167, 0.40339577, 0.46308458, 0.45341235, 0.40925452, 0.4689954, 0.46212262, 0.41751015, 0.47654805, 0.47209722, 0.42697477, 0.4861872, 0.48372567, 0.43831775, 0.4921391, 0.4896993, 0.44395274, 0.4788669, 0.47454727, 0.42905304, 0.47187188, 0.46527085, 0.42010412, 0.46658456, 0.45792884, 0.41303116, 0.46274406, 0.45198298, 0.40729395, 0.46039143, 0.44771364, 0.40291768, 0.45922396, 0.4445714, 0.3996963, 0.45915, 0.4426975, 0.39780167, 0.4596903, 0.4416545, 0.39663747, 0.46085495, 0.44123846, 0.39630082, 0.46141827, 0.4405595, 0.39607093, 0.46090633, 0.43888047, 0.39515755, 0.4605155, 0.43768987, 0.3945795, 0.4600442, 0.4367675, 0.39394528, 0.45952722, 0.4357155, 0.39303502, 0.4596451, 0.4353589, 0.39268905, 0.4598495, 0.43554977, 0.39261273, 0.45988148, 0.43565464, 0.39257628, 0.4600072, 0.43628863, 0.39290395, 0.46011505, 0.43749446, 0.39376038, 0.45949477, 0.43824497, 0.3941032, 0.45967442, 0.4393703, 0.39476636, 0.45992628, 0.44055504, 0.39560944, 0.46010628, 0.4421044, 0.3970969, 0.4596058, 0.44309497, 0.3981804, 0.4593249, 0.444418, 0.39966536, 0.45893383, 0.44581956, 0.4014791, 0.46055028, 0.44941244, 0.40491262, 0.46427548, 0.45541894, 0.41055405, 0.47028777, 0.46395934, 0.4185919, 0.4776288, 0.47344655, 0.42766345, 0.48678872, 0.48444715, 0.43855804, 0.4929573, 0.49056453, 0.44455546, 0.4801431, 0.47611168, 0.43034315, 0.47352782, 0.4675817, 0.4219302, 0.4678687, 0.46010107, 0.41450202, 0.4633305, 0.4538019, 0.408394, 0.46081543, 0.44945621, 0.40399024, 0.4597075, 0.4466324, 0.40103602, 0.4595097, 0.44488344, 0.39923117, 0.46011227, 0.44398293, 0.39817876, 0.46091866, 0.44328898, 0.3975599, 0.46109486, 0.44242743, 0.39703122, 0.46057677, 0.4410012, 0.39619935, 0.45984402, 0.4395359, 0.395409, 0.4590749, 0.43819544, 0.39447019, 0.4579443, 0.43650135, 0.3930418, 0.4582956, 0.4365312, 0.39307144, 0.4584816, 0.43685985, 0.39310333, 0.458212, 0.43669534, 0.3929341, 0.4584134, 0.43740058, 0.39341915, 0.45967066, 0.43951488, 0.39503577, 0.4594095, 0.44033423, 0.39531332, 0.45982704, 0.44154286, 0.39601713, 0.46075177, 0.44344717, 0.3976507, 0.46106845, 0.4451757, 0.39926547, 0.46086028, 0.44626087, 0.40052724, 0.46081224, 0.4475952, 0.40210167, 0.46081027, 0.4489939, 0.40386808, 0.46287122, 0.45296448, 0.40782395, 0.46694446, 0.45906132, 0.41354996, 0.47281367, 0.46696797, 0.42114028, 0.47940218, 0.47547796, 0.4293688, 0.48751536, 0.48523042, 0.43914047, 0.49343124, 0.49111083, 0.44498178, 0.4817631, 0.47798476, 0.43183413, 0.47598234, 0.47052667, 0.42439947, 0.47064516, 0.46366754, 0.4174625, 0.46593043, 0.4574782, 0.4113519, 0.46324423, 0.4531464, 0.40693176, 0.46209958, 0.45048827, 0.40416777, 0.46164948, 0.44874325, 0.40228638, 0.46212152, 0.4478831, 0.40128192, 0.4625585, 0.44704524, 0.40048638, 0.46232152, 0.44595855, 0.39955634, 0.46113315, 0.4440692, 0.39819762, 0.46008492, 0.44226795, 0.39717063, 0.4591275, 0.44072285, 0.39616692, 0.45850682, 0.43964285, 0.3952809, 0.45895976, 0.4398014, 0.3953519, 0.45937544, 0.44030246, 0.39567336, 0.45958784, 0.44074363, 0.3959883, 0.45954034, 0.44125912, 0.39633763, 0.46053416, 0.442994, 0.3977384, 0.4609965, 0.44430616, 0.39849144, 0.46180096, 0.44584665, 0.39952838, 0.46289277, 0.44781825, 0.40117937, 0.46352583, 0.44958597, 0.40269294, 0.46326247, 0.450424, 0.4036909, 0.46349007, 0.45189765, 0.4054761, 0.46399578, 0.4535567, 0.4075182, 0.4662247, 0.4575239, 0.41154274, 0.47061172, 0.46359962, 0.41736796, 0.47595087, 0.47060484, 0.42425632, 0.48173562, 0.4780661, 0.43158343, 0.48875228, 0.48646942, 0.44030133, 0.4938999, 0.49154198, 0.44536138, 0.48349097, 0.47992733, 0.43341362, 0.47837743, 0.47339857, 0.42672044, 0.4737485, 0.46747538, 0.42074406, 0.46997207, 0.46242562, 0.41557604, 0.46772632, 0.45874637, 0.41160282, 0.46658495, 0.45625645, 0.40891683, 0.46625254, 0.45501146, 0.40753707, 0.46618026, 0.45388258, 0.4064112, 0.4659845, 0.45264813, 0.40515968, 0.46549815, 0.45138732, 0.40403405, 0.46374315, 0.4489805, 0.40212932, 0.46178335, 0.44635344, 0.4001082, 0.4608888, 0.44491395, 0.39909902, 0.46102262, 0.4446472, 0.39896137, 0.4609971, 0.44441235, 0.398755, 0.46163887, 0.44510064, 0.39930743, 0.462117, 0.4458559, 0.39988503, 0.46218413, 0.4464289, 0.40041336, 0.46288723, 0.44773602, 0.40154725, 0.4638609, 0.44938737, 0.40277442, 0.46534562, 0.45146543, 0.40436718, 0.46712446, 0.45402792, 0.4065498, 0.46761593, 0.45538735, 0.40768346, 0.46748164, 0.45626524, 0.40868077, 0.46797082, 0.45785603, 0.4105124, 0.46918288, 0.46000096, 0.4129843, 0.4710879, 0.46325436, 0.41646507, 0.47502747, 0.46854535, 0.4216847, 0.47912446, 0.47417372, 0.42725676, 0.48401794, 0.48047122, 0.43368766, 0.48983815, 0.48751864, 0.44139686, 0.4950917, 0.49275237, 0.44649878, 0.48569858, 0.48222837, 0.43553528, 0.48191068, 0.4772362, 0.43017155, 0.47817746, 0.4725027, 0.4251798, 0.47538704, 0.46853805, 0.4209152, 0.47404024, 0.46590918, 0.41788188, 0.47315222, 0.4639131, 0.41566697, 0.472728, 0.4626964, 0.41439414, 0.4720675, 0.46129555, 0.41297945, 0.47174, 0.4601865, 0.411877, 0.4709266, 0.45862707, 0.4105563, 0.4690765, 0.45615128, 0.40840393, 0.46730587, 0.45370534, 0.4063889, 0.46650335, 0.45248532, 0.40538746, 0.46628976, 0.45191848, 0.404916, 0.466175, 0.45183614, 0.40495625, 0.46696663, 0.45275542, 0.40568537, 0.46741617, 0.45335218, 0.40624318, 0.46791244, 0.45416242, 0.40707466, 0.46925, 0.4559819, 0.40875977, 0.47052914, 0.45787305, 0.41029495, 0.47192872, 0.45994505, 0.411884, 0.47286353, 0.46166065, 0.41340396, 0.47371733, 0.4632468, 0.41483882, 0.47427946, 0.46453047, 0.4161221, 0.47448388, 0.46563137, 0.41739118, 0.4755192, 0.46751526, 0.41957146, 0.47705555, 0.46999267, 0.42250055, 0.4797391, 0.47366083, 0.42624933, 0.4828203, 0.47805458, 0.43066132, 0.4863529, 0.48289573, 0.43589237, 0.4911985, 0.4888579, 0.44269702, 0.49628946, 0.49393958, 0.4476208, 0.4885821, 0.48523116, 0.43835247, 0.48598143, 0.48157603, 0.43425906, 0.48337057, 0.47821656, 0.43051922, 0.48189464, 0.47563952, 0.42752433, 0.48108828, 0.47368678, 0.42521375, 0.48029864, 0.4719771, 0.42332226, 0.4802187, 0.4712401, 0.4225035, 0.4800259, 0.47041312, 0.42150456, 0.4799161, 0.469631, 0.42073488, 0.4788826, 0.46793967, 0.41931695, 0.4772575, 0.46569908, 0.4173225, 0.4762208, 0.46396434, 0.4158522, 0.47584766, 0.4632649, 0.41519472, 0.47556406, 0.46283564, 0.4148391, 0.4755079, 0.46289164, 0.4150495, 0.47546157, 0.46305245, 0.4150759, 0.4757245, 0.46347606, 0.41543457, 0.4763831, 0.464361, 0.41632405, 0.47755116, 0.46581525, 0.41775817, 0.47890738, 0.4677856, 0.41955388, 0.47990802, 0.46959606, 0.4209971, 0.48039538, 0.47074136, 0.42207196, 0.4812799, 0.47223872, 0.42346793, 0.48208222, 0.4735089, 0.42465597, 0.48213398, 0.47417128, 0.4254378, 0.48257753, 0.47531426, 0.42693436, 0.48360044, 0.4771128, 0.4291249, 0.48513362, 0.47931355, 0.4314932, 0.48721617, 0.4823682, 0.43478897, 0.48897672, 0.48528183, 0.43823946, 0.49222073, 0.4896482, 0.4435492, 0.49830624, 0.4955962, 0.44960618, 0.49203593, 0.4885651, 0.44193462, 0.491314, 0.48701072, 0.43987766, 0.49003676, 0.48515624, 0.43751255, 0.48953062, 0.48378173, 0.43550095, 0.48947585, 0.48272935, 0.43406305, 0.48907086, 0.4815859, 0.43273798, 0.48951945, 0.48148534, 0.4324123, 0.4898992, 0.48122045, 0.4319242, 0.4898536, 0.4806078, 0.43133497, 0.489178, 0.47933793, 0.4302843, 0.4882152, 0.47784773, 0.42898348, 0.48771352, 0.47686645, 0.4281205, 0.48785704, 0.47663307, 0.42779398, 0.48801175, 0.4767619, 0.42791557, 0.48795113, 0.476827, 0.42813677, 0.48731503, 0.47631937, 0.4275383, 0.48753265, 0.47673947, 0.42799515, 0.48777097, 0.4771585, 0.42849052, 0.48879042, 0.47844476, 0.42974117, 0.48988953, 0.4800059, 0.43115532, 0.49048847, 0.481198, 0.43232682, 0.4910368, 0.48238492, 0.43350604, 0.49149278, 0.4833758, 0.43444526, 0.49191988, 0.48423198, 0.43531227, 0.4915687, 0.48447606, 0.4356281, 0.4910316, 0.48467836, 0.43611318, 0.49121976, 0.48548782, 0.4373631, 0.4917494, 0.48647952, 0.43874055, 0.49212867, 0.48759937, 0.44022134, 0.4924926, 0.4888192, 0.44203192, 0.49410257, 0.49131668, 0.44551373, 0.50092065, 0.49783507, 0.45273027, 0.49646357, 0.49276203, 0.4469484, 0.49751094, 0.4931648, 0.44682762, 0.49757057, 0.49263683, 0.44570816, 0.49840772, 0.4927896, 0.44518217, 0.49932027, 0.49299964, 0.44494417, 0.49973533, 0.49280325, 0.44448593, 0.50017166, 0.4927442, 0.44414347, 0.5006935, 0.4927673, 0.44382164, 0.5009629, 0.4924989, 0.44352773, 0.50100833, 0.49202856, 0.44317222, 0.5008614, 0.4915098, 0.4426759, 0.5007643, 0.4911369, 0.44219226, 0.50109565, 0.49113566, 0.44201556, 0.5016425, 0.4916296, 0.4424733, 0.50157565, 0.49160177, 0.44258052, 0.5008911, 0.49097434, 0.44199085, 0.5010885, 0.49125972, 0.44242918, 0.5013047, 0.49162892, 0.44286737, 0.50176513, 0.49236336, 0.44362044, 0.5024305, 0.49341306, 0.44461703, 0.50267565, 0.49409327, 0.44538188, 0.50250834, 0.4944686, 0.4459162, 0.5019687, 0.49443263, 0.44594905, 0.5015734, 0.49443117, 0.44602332, 0.5010627, 0.49453232, 0.44619176, 0.5000859, 0.49428076, 0.44632775, 0.49915045, 0.4939502, 0.44652918, 0.49828786, 0.4935386, 0.44658235, 0.49716946, 0.49293026, 0.44639513, 0.49633157, 0.4926185, 0.44669232, 0.49662086, 0.4935319, 0.44855618]) \
.astype(np.float32) \
.reshape((32, 32, 3))
path_train = f"{os.path.dirname(os.path.abspath(__file__))}/southwest_images_new_train.pkl"
path_test = f"{os.path.dirname(os.path.abspath(__file__))}/southwest_images_new_test.pkl"
# with open(f"{os.path.dirname(os.path.abspath(__file__))}/southwest_images_new_test.pkl", 'rb') as train_f:
# saved_southwest_dataset_train = pickle.load(train_f)
CLASS = 0
x_train = np.load(path_train, allow_pickle=True).astype(np.float32)
x_test = np.load(path_test, allow_pickle=True).astype(np.float32)
y_train = np.repeat(CLASS, x_train.shape[0]).astype(np.uint8)
y_test = np.repeat(CLASS, x_test.shape[0]).astype(np.uint8)
# Normalize
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train, x_test = x_train - cifar_mean, x_test - cifar_mean
return (x_train, y_train), (x_test, y_test)
| 36,589 | 1,260.724138 | 35,628 |
py
|
fl-analysis
|
fl-analysis-master/src/data/leaf/model_utils.py
|
import json
import numpy as np
import os
from collections import defaultdict
def batch_data(data, batch_size, seed):
'''
data is a dict := {'x': [numpy array], 'y': [numpy array]} (on one client)
returns x, y, which are both numpy array of length: batch_size
'''
data_x = data['x']
data_y = data['y']
# randomly shuffle data
np.random.seed(seed)
rng_state = np.random.get_state()
np.random.shuffle(data_x)
np.random.set_state(rng_state)
np.random.shuffle(data_y)
# loop through mini-batches
for i in range(0, len(data_x), batch_size):
batched_x = data_x[i:i+batch_size]
batched_y = data_y[i:i+batch_size]
yield (batched_x, batched_y)
def read_dir(data_dir):
clients = []
groups = []
data = defaultdict(lambda : None)
files = os.listdir(data_dir)
files = [f for f in files if f.endswith('.json')]
for f in files:
file_path = os.path.join(data_dir,f)
with open(file_path, 'r') as inf:
cdata = json.load(inf)
clients.extend(cdata['users'])
if 'hierarchies' in cdata:
groups.extend(cdata['hierarchies'])
data.update(cdata['user_data'])
clients = list(sorted(data.keys()))
return clients, groups, data
def read_data(train_data_dir, test_data_dir):
'''parses data in given train and test data directories
assumes:
- the data in the input directories are .json files with
keys 'users' and 'user_data'
- the set of train set users is the same as the set of test set users
Return:
clients: list of client ids
groups: list of group ids; empty list if none found
train_data: dictionary of train data
test_data: dictionary of test data
'''
train_clients, train_groups, train_data = read_dir(train_data_dir)
test_clients, test_groups, test_data = read_dir(test_data_dir)
assert train_clients == test_clients
assert train_groups == test_groups
return train_clients, train_groups, train_data, test_data
| 2,067 | 28.542857 | 78 |
py
|
fl-analysis
|
fl-analysis-master/src/data/leaf/__init__.py
| 0 | 0 | 0 |
py
|
|
fl-analysis
|
fl-analysis-master/src/data/leaf/shakespeare/__init__.py
| 0 | 0 | 0 |
py
|
|
fl-analysis
|
fl-analysis-master/src/data/leaf/shakespeare/preprocess/shake_utils.py
|
'''
helper functions for preprocessing shakespeare data
'''
import json
import os
import re
def __txt_to_data(txt_dir, seq_length=80):
"""Parses text file in given directory into data for next-character model.
Args:
txt_dir: path to text file
seq_length: length of strings in X
"""
raw_text = ""
with open(txt_dir,'r') as inf:
raw_text = inf.read()
raw_text = raw_text.replace('\n', ' ')
raw_text = re.sub(r" *", r' ', raw_text)
dataX = []
dataY = []
for i in range(0, len(raw_text) - seq_length, 1):
seq_in = raw_text[i:i + seq_length]
seq_out = raw_text[i + seq_length]
dataX.append(seq_in)
dataY.append(seq_out)
return dataX, dataY
def parse_data_in(data_dir, users_and_plays_path, raw=False):
'''
returns dictionary with keys: users, num_samples, user_data
raw := bool representing whether to include raw text in all_data
if raw is True, then user_data key
removes users with no data
'''
with open(users_and_plays_path, 'r') as inf:
users_and_plays = json.load(inf)
files = os.listdir(data_dir)
users = []
hierarchies = []
num_samples = []
user_data = {}
for f in files:
user = f[:-4]
passage = ''
filename = os.path.join(data_dir, f)
with open(filename, 'r') as inf:
passage = inf.read()
dataX, dataY = __txt_to_data(filename)
if(len(dataX) > 0):
users.append(user)
if raw:
user_data[user] = {'raw': passage}
else:
user_data[user] = {}
user_data[user]['x'] = dataX
user_data[user]['y'] = dataY
hierarchies.append(users_and_plays[user])
num_samples.append(len(dataY))
all_data = {}
all_data['users'] = users
all_data['hierarchies'] = hierarchies
all_data['num_samples'] = num_samples
all_data['user_data'] = user_data
return all_data
| 2,004 | 28.925373 | 78 |
py
|
fl-analysis
|
fl-analysis-master/src/data/leaf/shakespeare/preprocess/preprocess_shakespeare.py
|
"""Preprocesses the Shakespeare dataset for federated training.
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
To run:
python preprocess_shakespeare.py path/to/raw/shakespeare.txt output_directory/
The raw data can be downloaded from:
http://www.gutenberg.org/cache/epub/100/pg100.txt
(The Plain Text UTF-8 file format, md5sum: 036d0f9cf7296f41165c2e6da1e52a0e)
Note that The Comedy of Errors has a incorrect indentation compared to all the
other plays in the file. The code below reflects that issue. To make the code
cleaner, you could fix the indentation in the raw shakespeare file and remove
the special casing for that play in the code below.
Authors: [email protected], [email protected]
Disclaimer: This is not an official Google product.
"""
import collections
import json
import os
import random
import re
import sys
RANDOM_SEED = 1234
# Regular expression to capture an actors name, and line continuation
CHARACTER_RE = re.compile(r'^ ([a-zA-Z][a-zA-Z ]*)\. (.*)')
CONT_RE = re.compile(r'^ (.*)')
# The Comedy of Errors has errors in its indentation so we need to use
# different regular expressions.
COE_CHARACTER_RE = re.compile(r'^([a-zA-Z][a-zA-Z ]*)\. (.*)')
COE_CONT_RE = re.compile(r'^(.*)')
def _match_character_regex(line, comedy_of_errors=False):
return (COE_CHARACTER_RE.match(line) if comedy_of_errors
else CHARACTER_RE.match(line))
def _match_continuation_regex(line, comedy_of_errors=False):
return (
COE_CONT_RE.match(line) if comedy_of_errors else CONT_RE.match(line))
def _split_into_plays(shakespeare_full):
"""Splits the full data by play."""
# List of tuples (play_name, dict from character to list of lines)
plays = []
discarded_lines = [] # Track discarded lines.
slines = shakespeare_full.splitlines(True)[1:]
# skip contents, the sonnets, and all's well that ends well
author_count = 0
start_i = 0
for i, l in enumerate(slines):
if 'by William Shakespeare' in l:
author_count += 1
if author_count == 2:
start_i = i - 5
break
slines = slines[start_i:]
current_character = None
comedy_of_errors = False
for i, line in enumerate(slines):
# This marks the end of the plays in the file.
if i > 124195 - start_i:
break
# This is a pretty good heuristic for detecting the start of a new play:
if 'by William Shakespeare' in line:
current_character = None
characters = collections.defaultdict(list)
# The title will be 2, 3, 4, 5, 6, or 7 lines above "by William Shakespeare".
if slines[i - 2].strip():
title = slines[i - 2]
elif slines[i - 3].strip():
title = slines[i - 3]
elif slines[i - 4].strip():
title = slines[i - 4]
elif slines[i - 5].strip():
title = slines[i - 5]
elif slines[i - 6].strip():
title = slines[i - 6]
else:
title = slines[i - 7]
title = title.strip()
assert title, (
'Parsing error on line %d. Expecting title 2 or 3 lines above.' %
i)
comedy_of_errors = (title == 'THE COMEDY OF ERRORS')
# Degenerate plays are removed at the end of the method.
plays.append((title, characters))
continue
match = _match_character_regex(line, comedy_of_errors)
if match:
character, snippet = match.group(1), match.group(2)
# Some character names are written with multiple casings, e.g., SIR_Toby
# and SIR_TOBY. To normalize the character names, we uppercase each name.
# Note that this was not done in the original preprocessing and is a
# recent fix.
character = character.upper()
if not (comedy_of_errors and character.startswith('ACT ')):
characters[character].append(snippet)
current_character = character
continue
else:
current_character = None
continue
elif current_character:
match = _match_continuation_regex(line, comedy_of_errors)
if match:
if comedy_of_errors and match.group(1).startswith('<'):
current_character = None
continue
else:
characters[current_character].append(match.group(1))
continue
# Didn't consume the line.
line = line.strip()
if line and i > 2646:
# Before 2646 are the sonnets, which we expect to discard.
discarded_lines.append('%d:%s' % (i, line))
# Remove degenerate "plays".
return [play for play in plays if len(play[1]) > 1], discarded_lines
def _remove_nonalphanumerics(filename):
return re.sub('\\W+', '_', filename)
def play_and_character(play, character):
return _remove_nonalphanumerics((play + '_' + character).replace(' ', '_'))
def _get_train_test_by_character(plays, test_fraction=0.2):
"""
Splits character data into train and test sets.
if test_fraction <= 0, returns {} for all_test_examples
plays := list of (play, dict) tuples where play is a string and dict
is a dictionary with character names as keys
"""
skipped_characters = 0
all_train_examples = collections.defaultdict(list)
all_test_examples = collections.defaultdict(list)
def add_examples(example_dict, example_tuple_list):
for play, character, sound_bite in example_tuple_list:
example_dict[play_and_character(
play, character)].append(sound_bite)
users_and_plays = {}
for play, characters in plays:
curr_characters = list(characters.keys())
for c in curr_characters:
users_and_plays[play_and_character(play, c)] = play
for character, sound_bites in characters.items():
examples = [(play, character, sound_bite)
for sound_bite in sound_bites]
if len(examples) <= 2:
skipped_characters += 1
# Skip characters with fewer than 2 lines since we need at least one
# train and one test line.
continue
train_examples = examples
if test_fraction > 0:
num_test = max(int(len(examples) * test_fraction), 1)
train_examples = examples[:-num_test]
test_examples = examples[-num_test:]
assert len(test_examples) == num_test
assert len(train_examples) >= len(test_examples)
add_examples(all_test_examples, test_examples)
add_examples(all_train_examples, train_examples)
return users_and_plays, all_train_examples, all_test_examples
def _write_data_by_character(examples, output_directory):
"""Writes a collection of data files by play & character."""
if not os.path.exists(output_directory):
os.makedirs(output_directory)
for character_name, sound_bites in examples.items():
filename = os.path.join(output_directory, character_name + '.txt')
with open(filename, 'w') as output:
for sound_bite in sound_bites:
output.write(sound_bite + '\n')
def main(argv):
print('Splitting .txt data between users')
input_filename = argv[0]
with open(input_filename, 'r') as input_file:
shakespeare_full = input_file.read()
plays, discarded_lines = _split_into_plays(shakespeare_full)
print('Discarded %d lines' % len(discarded_lines))
users_and_plays, all_examples, _ = _get_train_test_by_character(plays, test_fraction=-1.0)
output_directory = argv[1]
with open(os.path.join(output_directory, 'users_and_plays.json'), 'w') as ouf:
json.dump(users_and_plays, ouf)
_write_data_by_character(all_examples,
os.path.join(output_directory,
'by_play_and_character/'))
if __name__ == '__main__':
main(sys.argv[1:])
| 8,701 | 42.079208 | 94 |
py
|
fl-analysis
|
fl-analysis-master/src/data/leaf/shakespeare/preprocess/gen_all_data.py
|
import argparse
import json
import os
from shake_utils import parse_data_in
parser = argparse.ArgumentParser()
parser.add_argument('--raw',
help='include users\' raw .txt data in respective .json files',
action="store_true")
parser.set_defaults(raw=False)
args = parser.parse_args()
parent_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
users_and_plays_path = os.path.join(parent_path, 'data', 'raw_data', 'users_and_plays.json')
txt_dir = os.path.join(parent_path, 'data', 'raw_data', 'by_play_and_character')
json_data = parse_data_in(txt_dir, users_and_plays_path, args.raw)
json_path = os.path.join(parent_path, 'data', 'all_data', 'all_data.json')
with open(json_path, 'w') as outfile:
json.dump(json_data, outfile)
| 786 | 29.269231 | 92 |
py
|
fl-analysis
|
fl-analysis-master/src/data/leaf/shakespeare/preprocess/__init__.py
| 0 | 0 | 0 |
py
|
|
fl-analysis
|
fl-analysis-master/src/data/leaf/utils/sample.py
|
'''
samples from all raw data;
by default samples in a non-iid manner; namely, randomly selects users from
raw data until their cumulative amount of data exceeds the given number of
datapoints to sample (specified by --fraction argument);
ordering of original data points is not preserved in sampled data
'''
import argparse
import json
import os
import random
import time
from collections import OrderedDict
from constants import DATASETS, SEED_FILES
from util import iid_divide
parser = argparse.ArgumentParser()
parser.add_argument('--name',
help='name of dataset to parse; default: sent140;',
type=str,
choices=DATASETS,
default='sent140')
parser.add_argument('--iid',
help='sample iid;',
action="store_true")
parser.add_argument('--niid',
help="sample niid;",
dest='iid', action='store_false')
parser.add_argument('--fraction',
help='fraction of all data to sample; default: 0.1;',
type=float,
default=0.1)
parser.add_argument('--u',
help=('number of users in iid data set; ignored in niid case;'
'represented as fraction of original total number of users; '
'default: 0.01;'),
type=float,
default=0.01)
parser.add_argument('--seed',
help='seed for random sampling of data',
type=int,
default=None)
parser.set_defaults(iid=False)
args = parser.parse_args()
print('------------------------------')
print('sampling data')
parent_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
data_dir = os.path.join(parent_path, args.name, 'data')
subdir = os.path.join(data_dir, 'all_data')
files = os.listdir(subdir)
files = [f for f in files if f.endswith('.json')]
rng_seed = (args.seed if (args.seed is not None and args.seed >= 0) else int(time.time()))
print ("Using seed {}".format(rng_seed))
rng = random.Random(rng_seed)
print (os.environ.get('LEAF_DATA_META_DIR'))
if os.environ.get('LEAF_DATA_META_DIR') is not None:
seed_fname = os.path.join(os.environ.get('LEAF_DATA_META_DIR'), SEED_FILES['sampling'])
with open(seed_fname, 'w+') as f:
f.write("# sampling_seed used by sampling script - supply as "
"--smplseed to preprocess.sh or --seed to utils/sample.py\n")
f.write(str(rng_seed))
print ("- random seed written out to {file}".format(file=seed_fname))
else:
print ("- using random seed '{seed}' for sampling".format(seed=rng_seed))
new_user_count = 0 # for iid case
for f in files:
file_dir = os.path.join(subdir, f)
with open(file_dir, 'r') as inf:
# Load data into an OrderedDict, to prevent ordering changes
# and enable reproducibility
data = json.load(inf, object_pairs_hook=OrderedDict)
num_users = len(data['users'])
tot_num_samples = sum(data['num_samples'])
num_new_samples = int(args.fraction * tot_num_samples)
hierarchies = None
if(args.iid):
raw_list = list(data['user_data'].values())
raw_x = [elem['x'] for elem in raw_list]
raw_y = [elem['y'] for elem in raw_list]
x_list = [item for sublist in raw_x for item in sublist] # flatten raw_x
y_list = [item for sublist in raw_y for item in sublist] # flatten raw_y
num_new_users = int(round(args.u * num_users))
if num_new_users == 0:
num_new_users += 1
indices = [i for i in range(tot_num_samples)]
new_indices = rng.sample(indices, num_new_samples)
users = [str(i+new_user_count) for i in range(num_new_users)]
user_data = {}
for user in users:
user_data[user] = {'x': [], 'y': []}
all_x_samples = [x_list[i] for i in new_indices]
all_y_samples = [y_list[i] for i in new_indices]
x_groups = iid_divide(all_x_samples, num_new_users)
y_groups = iid_divide(all_y_samples, num_new_users)
for i in range(num_new_users):
user_data[users[i]]['x'] = x_groups[i]
user_data[users[i]]['y'] = y_groups[i]
num_samples = [len(user_data[u]['y']) for u in users]
new_user_count += num_new_users
else:
ctot_num_samples = 0
users = data['users']
users_and_hiers = None
if 'hierarchies' in data:
users_and_hiers = list(zip(users, data['hierarchies']))
rng.shuffle(users_and_hiers)
else:
rng.shuffle(users)
user_i = 0
num_samples = []
user_data = {}
if 'hierarchies' in data:
hierarchies = []
while(ctot_num_samples < num_new_samples):
hierarchy = None
if users_and_hiers is not None:
user, hier = users_and_hiers[user_i]
else:
user = users[user_i]
cdata = data['user_data'][user]
cnum_samples = len(data['user_data'][user]['y'])
if (ctot_num_samples + cnum_samples > num_new_samples):
cnum_samples = num_new_samples - ctot_num_samples
indices = [i for i in range(cnum_samples)]
new_indices = rng.sample(indices, cnum_samples)
x = []
y = []
for i in new_indices:
x.append(data['user_data'][user]['x'][i])
y.append(data['user_data'][user]['y'][i])
cdata = {'x': x, 'y': y}
if 'hierarchies' in data:
hierarchies.append(hier)
num_samples.append(cnum_samples)
user_data[user] = cdata
ctot_num_samples += cnum_samples
user_i += 1
if 'hierarchies' in data:
users = [u for u, h in users_and_hiers][:user_i]
else:
users = users[:user_i]
# ------------
# create .json file
all_data = {}
all_data['users'] = users
if hierarchies is not None:
all_data['hierarchies'] = hierarchies
all_data['num_samples'] = num_samples
all_data['user_data'] = user_data
slabel = ''
if(args.iid):
slabel = 'iid'
else:
slabel = 'niid'
arg_frac = str(args.fraction)
arg_frac = arg_frac[2:]
arg_nu = str(args.u)
arg_nu = arg_nu[2:]
arg_label = arg_frac
if(args.iid):
arg_label = '%s_%s' % (arg_nu, arg_label)
file_name = '%s_%s_%s.json' % ((f[:-5]), slabel, arg_label)
ouf_dir = os.path.join(data_dir, 'sampled_data', file_name)
print('writing %s' % file_name)
with open(ouf_dir, 'w') as outfile:
json.dump(all_data, outfile)
| 6,761 | 32.475248 | 91 |
py
|
fl-analysis
|
fl-analysis-master/src/data/leaf/utils/split_data.py
|
'''
splits data into train and test sets
'''
import argparse
import json
import os
import random
import time
import sys
from collections import OrderedDict
from constants import DATASETS, SEED_FILES
def create_jsons_for(user_files, which_set, max_users, include_hierarchy):
"""used in split-by-user case"""
user_count = 0
json_index = 0
users = []
num_samples = []
user_data = {}
prev_dir = None
for (i, t) in enumerate(user_files):
(u, ns, f) = t
file_dir = os.path.join(subdir, f)
if prev_dir != file_dir:
with open(file_dir, "r") as inf:
data = json.load(inf)
prev_dir = file_dir
users.append(u)
num_samples.append(ns)
user_data[u] = data['user_data'][u]
user_count += 1
if (user_count == max_users) or (i == len(user_files) - 1):
all_data = {}
all_data['users'] = users
all_data['num_samples'] = num_samples
all_data['user_data'] = user_data
data_i = f.find('data')
num_i = data_i + 5
num_to_end = f[num_i:]
param_i = num_to_end.find('_')
param_to_end = '.json'
if param_i != -1:
param_to_end = num_to_end[param_i:]
nf = '%s_%d%s' % (f[:(num_i-1)], json_index, param_to_end)
file_name = '%s_%s_%s.json' % ((nf[:-5]), which_set, arg_label)
ouf_dir = os.path.join(dir, which_set, file_name)
print('writing %s' % file_name)
with open(ouf_dir, 'w') as outfile:
json.dump(all_data, outfile)
user_count = 0
json_index += 1
users = []
num_samples = []
user_data = {}
parser = argparse.ArgumentParser()
parser.add_argument('--name',
help='name of dataset to parse; default: sent140;',
type=str,
choices=DATASETS,
default='sent140')
parser.add_argument('--by_user',
help='divide users into training and test set groups;',
dest='user', action='store_true')
parser.add_argument('--by_sample',
help="divide each user's samples into training and test set groups;",
dest='user', action='store_false')
parser.add_argument('--frac',
help='fraction in training set; default: 0.9;',
type=float,
default=0.9)
parser.add_argument('--seed',
help='seed for random partitioning of test/train data',
type=int,
default=None)
parser.set_defaults(user=False)
args = parser.parse_args()
print('------------------------------')
print('generating training and test sets')
parent_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
dir = os.path.join(parent_path, args.name, 'data')
subdir = os.path.join(dir, 'rem_user_data')
files = []
if os.path.exists(subdir):
files = os.listdir(subdir)
if len(files) == 0:
subdir = os.path.join(dir, 'sampled_data')
if os.path.exists(subdir):
files = os.listdir(subdir)
if len(files) == 0:
subdir = os.path.join(dir, 'all_data')
files = os.listdir(subdir)
files = [f for f in files if f.endswith('.json')]
rng_seed = (args.seed if (args.seed is not None and args.seed >= 0) else int(time.time()))
rng = random.Random(rng_seed)
if os.environ.get('LEAF_DATA_META_DIR') is not None:
seed_fname = os.path.join(os.environ.get('LEAF_DATA_META_DIR'), SEED_FILES['split'])
with open(seed_fname, 'w+') as f:
f.write("# split_seed used by sampling script - supply as "
"--spltseed to preprocess.sh or --seed to utils/split_data.py\n")
f.write(str(rng_seed))
print ("- random seed written out to {file}".format(file=seed_fname))
else:
print ("- using random seed '{seed}' for sampling".format(seed=rng_seed))
arg_label = str(args.frac)
arg_label = arg_label[2:]
# check if data contains information on hierarchies
file_dir = os.path.join(subdir, files[0])
with open(file_dir, 'r') as inf:
data = json.load(inf)
include_hierarchy = 'hierarchies' in data
if (args.user):
print('splitting data by user')
# 1 pass through all the json files to instantiate arr
# containing all possible (user, .json file name) tuples
user_files = []
for f in files:
file_dir = os.path.join(subdir, f)
with open(file_dir, 'r') as inf:
# Load data into an OrderedDict, to prevent ordering changes
# and enable reproducibility
data = json.load(inf, object_pairs_hook=OrderedDict)
user_files.extend([(u, ns, f) for (u, ns) in
zip(data['users'], data['num_samples'])])
# randomly sample from user_files to pick training set users
num_users = len(user_files)
num_train_users = int(args.frac * num_users)
indices = [i for i in range(num_users)]
train_indices = rng.sample(indices, num_train_users)
train_blist = [False for i in range(num_users)]
for i in train_indices:
train_blist[i] = True
train_user_files = []
test_user_files = []
for i in range(num_users):
if (train_blist[i]):
train_user_files.append(user_files[i])
else:
test_user_files.append(user_files[i])
max_users = sys.maxsize
if args.name == 'femnist':
max_users = 50 # max number of users per json file
create_jsons_for(train_user_files, 'train', max_users, include_hierarchy)
create_jsons_for(test_user_files, 'test', max_users, include_hierarchy)
else:
print('splitting data by sample')
for f in files:
file_dir = os.path.join(subdir, f)
with open(file_dir, 'r') as inf:
# Load data into an OrderedDict, to prevent ordering changes
# and enable reproducibility
data = json.load(inf, object_pairs_hook=OrderedDict)
num_samples_train = []
user_data_train = {}
num_samples_test = []
user_data_test = {}
user_indices = [] # indices of users in data['users'] that are not deleted
removed = 0
for i, u in enumerate(data['users']):
curr_num_samples = len(data['user_data'][u]['y'])
if curr_num_samples >= 2:
# ensures number of train and test samples both >= 1
num_train_samples = max(1, int(args.frac * curr_num_samples))
if curr_num_samples == 2:
num_train_samples = 1
num_test_samples = curr_num_samples - num_train_samples
indices = [j for j in range(curr_num_samples)]
if args.name in ['shakespeare']:
train_indices = [i for i in range(num_train_samples)]
test_indices = [i for i in range(num_train_samples + 80 - 1, curr_num_samples)]
else:
train_indices = rng.sample(indices, num_train_samples)
test_indices = [i for i in range(curr_num_samples) if i not in train_indices]
if len(train_indices) >= 1 and len(test_indices) >= 1:
user_indices.append(i)
num_samples_train.append(num_train_samples)
num_samples_test.append(num_test_samples)
user_data_train[u] = {'x': [], 'y': []}
user_data_test[u] = {'x': [], 'y': []}
train_blist = [False for _ in range(curr_num_samples)]
test_blist = [False for _ in range(curr_num_samples)]
for j in train_indices:
train_blist[j] = True
for j in test_indices:
test_blist[j] = True
for j in range(curr_num_samples):
if (train_blist[j]):
user_data_train[u]['x'].append(data['user_data'][u]['x'][j])
user_data_train[u]['y'].append(data['user_data'][u]['y'][j])
elif (test_blist[j]):
user_data_test[u]['x'].append(data['user_data'][u]['x'][j])
user_data_test[u]['y'].append(data['user_data'][u]['y'][j])
users = [data['users'][i] for i in user_indices]
all_data_train = {}
all_data_train['users'] = users
all_data_train['num_samples'] = num_samples_train
all_data_train['user_data'] = user_data_train
all_data_test = {}
all_data_test['users'] = users
all_data_test['num_samples'] = num_samples_test
all_data_test['user_data'] = user_data_test
file_name_train = '%s_train_%s.json' % ((f[:-5]), arg_label)
file_name_test = '%s_test_%s.json' % ((f[:-5]), arg_label)
ouf_dir_train = os.path.join(dir, 'train', file_name_train)
ouf_dir_test = os.path.join(dir, 'test', file_name_test)
print('writing %s' % file_name_train)
with open(ouf_dir_train, 'w') as outfile:
json.dump(all_data_train, outfile)
print('writing %s' % file_name_test)
with open(ouf_dir_test, 'w') as outfile:
json.dump(all_data_test, outfile)
| 9,200 | 35.951807 | 99 |
py
|
fl-analysis
|
fl-analysis-master/src/data/leaf/utils/constants.py
|
DATASETS = ['sent140', 'femnist', 'shakespeare', 'celeba', 'synthetic']
SEED_FILES = { 'sampling': 'sampling_seed.txt', 'split': 'split_seed.txt' }
| 148 | 48.666667 | 75 |
py
|
fl-analysis
|
fl-analysis-master/src/data/leaf/utils/stats.py
|
'''
assumes that the user has already generated .json file(s) containing data
'''
import argparse
import json
import matplotlib.pyplot as plt
import math
import numpy as np
import os
from scipy import io
from scipy import stats
from constants import DATASETS
parser = argparse.ArgumentParser()
parser.add_argument('--name',
help='name of dataset to parse; default: sent140;',
type=str,
choices=DATASETS,
default='sent140')
args = parser.parse_args()
def load_data(name):
users = []
num_samples = []
parent_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
data_dir = os.path.join(parent_path, name, 'data')
subdir = os.path.join(data_dir, 'all_data')
files = os.listdir(subdir)
files = [f for f in files if f.endswith('.json')]
for f in files:
file_dir = os.path.join(subdir, f)
with open(file_dir) as inf:
data = json.load(inf)
users.extend(data['users'])
num_samples.extend(data['num_samples'])
return users, num_samples
def print_dataset_stats(name):
users, num_samples = load_data(name)
num_users = len(users)
print('####################################')
print('DATASET: %s' % name)
print('%d users' % num_users)
print('%d samples (total)' % np.sum(num_samples))
print('%.2f samples per user (mean)' % np.mean(num_samples))
print('num_samples (std): %.2f' % np.std(num_samples))
print('num_samples (std/mean): %.2f' % (np.std(num_samples)/np.mean(num_samples)))
print('num_samples (skewness): %.2f' % stats.skew(num_samples))
bins = [0, 20, 40, 60, 80, 100, 120, 140, 160, 180, 200]
if args.name == 'shakespeare':
bins = [0, 2000, 4000, 6000, 8000, 10000, 12000, 14000, 16000, 18000, 20000]
if args.name == 'femnist':
bins = [0, 20, 40, 60, 80, 100, 120, 140, 160, 180, 200, 220, 240, 260, 280, 300, 320, 340, 360, 380, 400, 420, 440, 460, 480, 500]
if args.name == 'celeba':
bins = [2 * i for i in range(20)]
if args.name == 'sent140':
bins = [i for i in range(16)]
hist, edges = np.histogram(num_samples, bins=bins)
for e, h in zip(edges, hist):
print(e, "\t", h)
parent_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
data_dir = os.path.join(parent_path, name, 'data')
plt.hist(num_samples, bins = bins)
fig_name = "%s_hist_nolabel.png" % name
fig_dir = os.path.join(data_dir, fig_name)
plt.savefig(fig_dir)
plt.title(name)
plt.xlabel('number of samples')
plt.ylabel("number of users")
fig_name = "%s_hist.png" % name
fig_dir = os.path.join(data_dir, fig_name)
plt.savefig(fig_dir)
print_dataset_stats(args.name)
| 2,786 | 28.967742 | 139 |
py
|
fl-analysis
|
fl-analysis-master/src/data/leaf/utils/remove_users.py
|
'''
removes users with less than the given number of samples
'''
import argparse
import json
import os
from constants import DATASETS
parser = argparse.ArgumentParser()
parser.add_argument('--name',
help='name of dataset to parse; default: sent140;',
type=str,
choices=DATASETS,
default='sent140')
parser.add_argument('--min_samples',
help='users with less than x samples are discarded; default: 10;',
type=int,
default=10)
args = parser.parse_args()
print('------------------------------')
print('removing users with less than %d samples' % args.min_samples)
parent_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
dir = os.path.join(parent_path, args.name, 'data')
subdir = os.path.join(dir, 'sampled_data')
files = []
if os.path.exists(subdir):
files = os.listdir(subdir)
if len(files) == 0:
subdir = os.path.join(dir, 'all_data')
files = os.listdir(subdir)
files = [f for f in files if f.endswith('.json')]
for f in files:
users = []
hierarchies = []
num_samples = []
user_data = {}
file_dir = os.path.join(subdir, f)
with open(file_dir, 'r') as inf:
data = json.load(inf)
num_users = len(data['users'])
for i in range(num_users):
curr_user = data['users'][i]
curr_hierarchy = None
if 'hierarchies' in data:
curr_hierarchy = data['hierarchies'][i]
curr_num_samples = data['num_samples'][i]
if (curr_num_samples >= args.min_samples):
user_data[curr_user] = data['user_data'][curr_user]
users.append(curr_user)
if curr_hierarchy is not None:
hierarchies.append(curr_hierarchy)
num_samples.append(data['num_samples'][i])
all_data = {}
all_data['users'] = users
if len(hierarchies) == len(users):
all_data['hierarchies'] = hierarchies
all_data['num_samples'] = num_samples
all_data['user_data'] = user_data
file_name = '%s_keep_%d.json' % ((f[:-5]), args.min_samples)
ouf_dir = os.path.join(dir, 'rem_user_data', file_name)
print('writing %s' % file_name)
with open(ouf_dir, 'w') as outfile:
json.dump(all_data, outfile)
| 2,288 | 27.974684 | 82 |
py
|
fl-analysis
|
fl-analysis-master/src/data/leaf/utils/util.py
|
import pickle
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def iid_divide(l, g):
'''
divide list l among g groups
each group has either int(len(l)/g) or int(len(l)/g)+1 elements
returns a list of groups
'''
num_elems = len(l)
group_size = int(len(l)/g)
num_big_groups = num_elems - g * group_size
num_small_groups = g - num_big_groups
glist = []
for i in range(num_small_groups):
glist.append(l[group_size * i : group_size * (i + 1)])
bi = group_size*num_small_groups
group_size += 1
for i in range(num_big_groups):
glist.append(l[bi + group_size * i:bi + group_size * (i + 1)])
return glist
| 839 | 25.25 | 70 |
py
|
fl-analysis
|
fl-analysis-master/src/data/leaf/utils/__init__.py
| 0 | 0 | 0 |
py
|
|
fl-analysis
|
fl-analysis-master/src/backdoor/edge_case_attack.py
|
import numpy as np
import src.data.ardis as ardis
import src.data.southwest as southwest
class EdgeCaseAttack:
def load(self) -> ((np.ndarray, np.ndarray), (np.ndarray, np.ndarray), (np.ndarray, np.ndarray)):
"""Loads training and test set"""
raise NotImplementedError("Do not instantiate superclass")
class NorthWesternEdgeCase(EdgeCaseAttack):
"""Edge case for northwestern airlines planes, CIFAR-10, 32x32"""
def load(self) -> ((np.ndarray, np.ndarray), (np.ndarray, np.ndarray), (np.ndarray, np.ndarray)):
(x_train, _), (x_test, _) = southwest.load_data()
y_train = np.repeat(self._classify_as_label(), x_train.shape[0]).astype(np.uint8)
y_test = np.repeat(self._classify_as_label(), x_test.shape[0]).astype(np.uint8)
orig_y_train, orig_y_test = np.repeat(self._original_label(), x_train.shape[0]).astype(np.uint8), \
np.repeat(self._original_label(), x_test.shape[0]).astype(np.uint8),
return (x_train, y_train), (x_test, y_test), (orig_y_train, orig_y_test)
def _classify_as_label(self):
return 9
def _original_label(self):
return 0
class EuropeanSevenEdgeCase(EdgeCaseAttack):
""" Loads european writing style of 7 (from ARDIS dataset) """
def load(self) -> ((np.ndarray, np.ndarray), (np.ndarray, np.ndarray)):
(x_train, _), (x_test, _) = ardis.load_data()
y_train = np.repeat(self._classify_as_label(), x_train.shape[0]).astype(np.uint8)
y_test = np.repeat(self._classify_as_label(), x_test.shape[0]).astype(np.uint8)
orig_y_train, orig_y_test = np.repeat(self._original_label(), x_train.shape[0]).astype(np.uint8), \
np.repeat(self._original_label(), x_test.shape[0]).astype(np.uint8),
return (x_train, y_train), (x_test, y_test), (orig_y_train, orig_y_test)
def _classify_as_label(self):
return 1
def _original_label(self):
return 7
class EuropeanSevenPixelPatternEdgeCase(EdgeCaseAttack):
""" Loads european writing style of 7 (from ARDIS dataset), and adds pixel pattern """
def load(self) -> ((np.ndarray, np.ndarray), (np.ndarray, np.ndarray)):
(x_train, _), (x_test, _) = ardis.load_data()
y_train = np.repeat(self._classify_as_label(), x_train.shape[0]).astype(np.uint8)
y_test = np.repeat(self._classify_as_label(), x_test.shape[0]).astype(np.uint8)
orig_y_train, orig_y_test = np.repeat(self._original_label(), x_train.shape[0]).astype(np.uint8), \
np.repeat(self._original_label(), x_test.shape[0]).astype(np.uint8),
# add pixel pattern
def pixel_pattern(images):
triggersize = 4
position = 10
trigger = np.ones((images.shape[0], triggersize, triggersize, images.shape[-1]))
images[:, position:(triggersize + position), position:(triggersize + position), :] = trigger
return images
x_train = pixel_pattern(x_train)
x_test = pixel_pattern(x_test)
return (x_train, y_train), (x_test, y_test), (orig_y_train, orig_y_test)
def _classify_as_label(self):
return 1
def _original_label(self):
return 7
class EuropeanSevenBaselineEdgeCase(EuropeanSevenEdgeCase):
""" Loads european writing style of 7 (from ARDIS dataset).
Baseline version, see how many 7s already classify as 7
"""
def _classify_as_label(self):
return 7
class EuropeanSevenCorrectlyClassifiedOnly(EuropeanSevenEdgeCase):
""" Loads european writing style of 7 (from ARDIS dataset).
Does this attack work as well for numbers that are naturally 7s ?
"""
def load(self) -> ((np.ndarray, np.ndarray), (np.ndarray, np.ndarray)):
(x_train, y_train), (x_test, y_test), (orig_y_train, orig_y_test) = super(EuropeanSevenCorrectlyClassifiedOnly, self).load()
correctly_classified_indices_train = [2, 5, 6, 8, 16, 17, 20, 21, 24, 26, 27, 28, 29, 30, 32, 33, 34, 35, 40, 44, 47, 49, 50, 52, 55, 58, 59, 61, 64, 65, 67, 68, 69, 70, 71, 72, 75, 76, 79, 81, 82, 85, 89, 90, 95, 97, 98, 99, 103, 109, 110, 113, 119, 129, 130, 131, 138, 139, 141, 142, 143, 147, 148, 149, 151, 153, 154, 156, 157, 158, 159, 160, 161, 163, 164, 167, 187, 201, 206, 213, 216, 217, 219, 220, 225, 227, 228, 229, 237, 241, 255, 257, 260, 261, 268, 269, 271, 274, 279, 286, 291, 296, 309, 312, 330, 334, 339, 342, 345, 347, 348, 349, 350, 351, 354, 357, 362, 365, 366, 368, 374, 375, 377, 378, 379, 380, 382, 383, 385, 394, 395, 400, 404, 405, 411, 420, 422, 424, 425, 427, 428, 431, 441, 448, 453, 456, 459, 461, 462, 463, 464, 465, 469, 474, 481, 482, 484, 492, 497, 498, 503, 504, 507, 512, 519, 521, 523, 524, 526, 528, 530, 531, 535, 536, 543, 551, 553, 554, 555, 561, 575, 582, 585, 589, 592, 593, 600, 604, 613, 616, 621, 622, 628, 630, 632, 635, 639, 640, 647, 649, 653, 659]
correctly_classified_indices_test = [1, 3, 13, 19, 21, 24, 25, 28, 30, 35, 43, 45, 46, 54, 56, 58, 62, 75, 78, 79, 82, 84, 89, 97]
return (x_train[correctly_classified_indices_train], y_train[correctly_classified_indices_train]), \
(x_test, y_test), (orig_y_train, orig_y_test)
class EuropeanSevenCorrectlyClassifiedOnlyRandomized(EuropeanSevenEdgeCase):
""" Loads european writing style of 7 (from ARDIS dataset).
Does this attack work as well for numbers that are naturally 7s ?
"""
def load(self) -> ((np.ndarray, np.ndarray), (np.ndarray, np.ndarray)):
(x_train, y_train), (x_test, y_test), (orig_y_train, orig_y_test) = super(EuropeanSevenCorrectlyClassifiedOnlyRandomized, self).load()
correctly_classified_indices_train = [2, 5, 6, 8, 16, 17, 20, 21, 24, 26, 27, 28, 29, 30, 32, 33, 34, 35, 40, 44, 47, 49, 50, 52, 55, 58, 59, 61, 64, 65, 67, 68, 69, 70, 71, 72, 75, 76, 79, 81, 82, 85, 89, 90, 95, 97, 98, 99, 103, 109, 110, 113, 119, 129, 130, 131, 138, 139, 141, 142, 143, 147, 148, 149, 151, 153, 154, 156, 157, 158, 159, 160, 161, 163, 164, 167, 187, 201, 206, 213, 216, 217, 219, 220, 225, 227, 228, 229, 237, 241, 255, 257, 260, 261, 268, 269, 271, 274, 279, 286, 291, 296, 309, 312, 330, 334, 339, 342, 345, 347, 348, 349, 350, 351, 354, 357, 362, 365, 366, 368, 374, 375, 377, 378, 379, 380, 382, 383, 385, 394, 395, 400, 404, 405, 411, 420, 422, 424, 425, 427, 428, 431, 441, 448, 453, 456, 459, 461, 462, 463, 464, 465, 469, 474, 481, 482, 484, 492, 497, 498, 503, 504, 507, 512, 519, 521, 523, 524, 526, 528, 530, 531, 535, 536, 543, 551, 553, 554, 555, 561, 575, 582, 585, 589, 592, 593, 600, 604, 613, 616, 621, 622, 628, 630, 632, 635, 639, 640, 647, 649, 653, 659]
correctly_classified_indices_test = [1, 3, 13, 19, 21, 24, 25, 28, 30, 35, 43, 45, 46, 54, 56, 58, 62, 75, 78, 79, 82, 84, 89, 97]
correctly_classified_indices_train = np.random.choice(x_train.shape[0], len(correctly_classified_indices_train), replace=False)
correctly_classified_indices_test = np.random.choice(x_test.shape[0], len(correctly_classified_indices_test), replace=False)
return (x_train[correctly_classified_indices_train], y_train[correctly_classified_indices_train]), \
(x_test, y_test), (orig_y_train, orig_y_test)
class EuropeanSevenValidaitonOriginalSevenOnly(EuropeanSevenEdgeCase):
""" Loads european writing style of 7 (from ARDIS dataset).
Does this attack work as well for numbers that are naturally 7s ?
"""
def load(self) -> ((np.ndarray, np.ndarray), (np.ndarray, np.ndarray)):
(x_train, y_train), (x_test, y_test), (orig_y_train, orig_y_test) = super(EuropeanSevenValidaitonOriginalSevenOnly, self).load()
correctly_classified_indices_train = [2, 5, 6, 8, 16, 17, 20, 21, 24, 26, 27, 28, 29, 30, 32, 33, 34, 35, 40, 44, 47, 49, 50, 52, 55, 58, 59, 61, 64, 65, 67, 68, 69, 70, 71, 72, 75, 76, 79, 81, 82, 85, 89, 90, 95, 97, 98, 99, 103, 109, 110, 113, 119, 129, 130, 131, 138, 139, 141, 142, 143, 147, 148, 149, 151, 153, 154, 156, 157, 158, 159, 160, 161, 163, 164, 167, 187, 201, 206, 213, 216, 217, 219, 220, 225, 227, 228, 229, 237, 241, 255, 257, 260, 261, 268, 269, 271, 274, 279, 286, 291, 296, 309, 312, 330, 334, 339, 342, 345, 347, 348, 349, 350, 351, 354, 357, 362, 365, 366, 368, 374, 375, 377, 378, 379, 380, 382, 383, 385, 394, 395, 400, 404, 405, 411, 420, 422, 424, 425, 427, 428, 431, 441, 448, 453, 456, 459, 461, 462, 463, 464, 465, 469, 474, 481, 482, 484, 492, 497, 498, 503, 504, 507, 512, 519, 521, 523, 524, 526, 528, 530, 531, 535, 536, 543, 551, 553, 554, 555, 561, 575, 582, 585, 589, 592, 593, 600, 604, 613, 616, 621, 622, 628, 630, 632, 635, 639, 640, 647, 649, 653, 659]
correctly_classified_indices_test = [1, 3, 13, 19, 21, 24, 25, 28, 30, 35, 43, 45, 46, 54, 56, 58, 62, 75, 78, 79, 82, 84, 89, 97]
return (x_train, y_train), \
(x_test[correctly_classified_indices_test], y_test[correctly_classified_indices_test]), \
(orig_y_train, orig_y_test)
class CifarRandomNoiseEdgeCase(EdgeCaseAttack):
"""Random noise, CIFAR-10, 32x32"""
def load(self) -> ((np.ndarray, np.ndarray), (np.ndarray, np.ndarray), (np.ndarray, np.ndarray)):
num_images = 196 # Same as airline test set
x = np.random.normal(0.0, 1.0, (num_images, 32, 32, 3)).astype(np.float32)
x = np.clip(x, -1.0, 1.0)
y = np.repeat(self._classify_as_label(), num_images).astype(np.uint8)
return (x, y), (x, y), (None, None)
def _classify_as_label(self):
return 2
class FEMNISTRandomNoiseEdgeCase(EdgeCaseAttack):
"""Random noise, CIFAR-10, 32x32"""
def load(self) -> ((np.ndarray, np.ndarray), (np.ndarray, np.ndarray), (np.ndarray, np.ndarray)):
num_images = 660 # Same as airline test set
x = np.random.normal(0.0, 1.0, (num_images, 28, 28, 1)).astype(np.float32)
x = np.clip(x, -1.0, 1.0)
y = np.repeat(self._classify_as_label(), num_images).astype(np.uint8)
return (x, y), (x, y), (None, None)
def _classify_as_label(self):
return 2
| 10,118 | 62.641509 | 1,003 |
py
|
fl-analysis
|
fl-analysis-master/src/backdoor/__init__.py
| 0 | 0 | 0 |
py
|
|
fl-analysis
|
fl-analysis-master/src/model/resnet.py
|
from __future__ import print_function
import tensorflow as tf
from tensorflow.keras.layers import Dense, Conv2D, BatchNormalization, Activation
from tensorflow.keras.layers import AveragePooling2D, Input, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.regularizers import l2
from tensorflow.keras.models import Model
import numpy as np
import os
# # Training parameters
# batch_size = 32 # orig paper trained all networks with batch_size=128
# epochs = 200
# data_augmentation = True
# num_classes = 10
#
# # Subtracting pixel mean improves accuracy
# subtract_pixel_mean = True
#
# # Model parameter
# # ----------------------------------------------------------------------------
# # | | 200-epoch | Orig Paper| 200-epoch | Orig Paper| sec/epoch
# # Model | n | ResNet v1 | ResNet v1 | ResNet v2 | ResNet v2 | GTX1080Ti
# # |v1(v2)| %Accuracy | %Accuracy | %Accuracy | %Accuracy | v1 (v2)
# # ----------------------------------------------------------------------------
# # ResNet20 | 3 (2)| 92.16 | 91.25 | ----- | ----- | 35 (---)
# # ResNet32 | 5(NA)| 92.46 | 92.49 | NA | NA | 50 ( NA)
# # ResNet44 | 7(NA)| 92.50 | 92.83 | NA | NA | 70 ( NA)
# # ResNet56 | 9 (6)| 92.71 | 93.03 | 93.01 | NA | 90 (100)
# # ResNet110 |18(12)| 92.65 | 93.39+-.16| 93.15 | 93.63 | 165(180)
# # ResNet164 |27(18)| ----- | 94.07 | ----- | 94.54 | ---(---)
# # ResNet1001| (111)| ----- | 92.39 | ----- | 95.08+-.14| ---(---)
# # ---------------------------------------------------------------------------
# n = 3
#
# # Model version
# # Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2)
# version = 1
#
# # Computed depth from supplied model parameter n
# if version == 1:
# depth = n * 6 + 2
# elif version == 2:
# depth = n * 9 + 2
#
# # Model name, depth and version
# model_type = 'ResNet%dv%d' % (depth, version)
#
# # Load the CIFAR10 data.
# (x_train, y_train), (x_test, y_test) = cifar10.load_data()
#
# # Input image dimensions.
# input_shape = x_train.shape[1:]
#
# # Normalize data.
# x_train = x_train.astype('float32') / 255
# x_test = x_test.astype('float32') / 255
#
# # If subtract pixel mean is enabled
# if subtract_pixel_mean:
# x_train_mean = np.mean(x_train, axis=0)
# x_train -= x_train_mean
# x_test -= x_train_mean
#
# print('x_train shape:', x_train.shape)
# print(x_train.shape[0], 'train samples')
# print(x_test.shape[0], 'test samples')
# print('y_train shape:', y_train.shape)
#
# # Convert class vectors to binary class matrices.
# y_train = keras.utils.to_categorical(y_train, num_classes)
# y_test = keras.utils.to_categorical(y_test, num_classes)
def lr_schedule(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = 1e-3
if epoch > 180:
lr *= 0.5e-3
elif epoch > 160:
lr *= 1e-3
elif epoch > 120:
lr *= 1e-2
elif epoch > 80:
lr *= 1e-1
print('Learning rate: ', lr)
return lr
def resnet_layer(inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True,
name=None):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4),
name=name)
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v1(input_shape, depth, num_classes=10):
"""ResNet Version 1 Model builder [a]
Stacks of 2 x (3 x 3) Conv2D-BN-ReLU
Last ReLU is after the shortcut connection.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filters is
doubled. Within each stage, the layers have the same number filters and the
same number of filters.
Features maps sizes:
stage 0: 32x32, 16
stage 1: 16x16, 32
stage 2: 8x8, 64
The Number of parameters is approx the same as Table 6 of [a]:
ResNet20 0.27M
ResNet32 0.46M
ResNet44 0.66M
ResNet56 0.85M
ResNet110 1.7M
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 6 != 0:
raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
# Start model definition.
num_filters = 16
num_res_blocks = int((depth - 2) / 6)
inputs = Input(shape=input_shape)
x = resnet_layer(inputs=inputs, num_filters = num_filters)
# Instantiate the stack of residual units
for stack in range(3):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0: # first layer but not first stack
strides = 2 # downsample
y = resnet_layer(inputs=x,
num_filters=num_filters,
strides=strides,
name=f"Conv2D_stack{stack}_res{res_block}_l0")
y = resnet_layer(inputs=y,
num_filters=num_filters,
activation=None,
name=f"Conv2D_stack{stack}_res{res_block}_l1")
if stack > 0 and res_block == 0: # first layer but not first stack
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False,
name=f"Conv2D_stack{stack}_res{res_block}_l2")
x = tf.keras.layers.add([x, y])
x = Activation('relu')(x)
num_filters *= 2
# Add classifier on top.
# v1 does not use BN after last shortcut connection-ReLU
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='he_normal')(y)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model
def resnet_v2(input_shape, depth, num_classes=10):
"""ResNet Version 2 Model builder [b]
Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as
bottleneck layer
First shortcut connection per layer is 1 x 1 Conv2D.
Second and onwards shortcut connection is identity.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filter maps is
doubled. Within each stage, the layers have the same number filters and the
same filter map sizes.
Features maps sizes:
conv1 : 32x32, 16
stage 0: 32x32, 64
stage 1: 16x16, 128
stage 2: 8x8, 256
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
# Start model definition.
num_filters_in = 32
num_res_blocks = int((depth - 2) / 9)
inputs = Input(shape=input_shape)
# v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths
x = resnet_layer(inputs=inputs,
num_filters=num_filters_in,
conv_first=True)
# Instantiate the stack of residual units
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0: # first layer and first stage
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0: # first layer but not first stage
strides = 2 # downsample
# bottleneck residual unit
y = resnet_layer(inputs=x,
num_filters=num_filters_in,
kernel_size=1,
strides=strides,
activation=activation,
batch_normalization=batch_normalization,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_in,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_out,
kernel_size=1,
conv_first=False)
if res_block == 0:
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters_out,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = tf.keras.layers.add([x, y])
num_filters_in = num_filters_out
# Add classifier on top.
# v2 has BN-ReLU before Pooling
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='glorot_uniform')(y)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model
#
# if version == 2:
# model = resnet_v2(input_shape=input_shape, depth=depth)
# else:
# model = resnet_v1(input_shape=input_shape, depth=depth)
#
# model.compile(loss='categorical_crossentropy',
# optimizer=Adam(learning_rate=lr_schedule(0)),
# metrics=['accuracy'])
# model.summary()
# print(model_type)
#
# # Prepare model model saving directory.
# save_dir = os.path.join(os.getcwd(), 'saved_models')
# model_name = 'cifar10_%s_model.{epoch:03d}.h5' % model_type
# if not os.path.isdir(save_dir):
# os.makedirs(save_dir)
# filepath = os.path.join(save_dir, model_name)
#
# # Prepare callbacks for model saving and for learning rate adjustment.
# checkpoint = ModelCheckpoint(filepath=filepath,
# monitor='val_acc',
# verbose=1,
# save_best_only=True)
#
# lr_scheduler = LearningRateScheduler(lr_schedule)
#
# lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
# cooldown=0,
# patience=5,
# min_lr=0.5e-6)
#
# callbacks = [checkpoint, lr_reducer, lr_scheduler]
#
# # Run training, with or without data augmentation.
# if not data_augmentation:
# print('Not using data augmentation.')
# model.fit(x_train, y_train,
# batch_size=batch_size,
# epochs=epochs,
# validation_data=(x_test, y_test),
# shuffle=True,
# callbacks=callbacks)
# else:
# print('Using real-time data augmentation.')
# # This will do preprocessing and realtime data augmentation:
# datagen = ImageDataGenerator(
# # set input mean to 0 over the dataset
# featurewise_center=False,
# # set each sample mean to 0
# samplewise_center=False,
# # divide inputs by std of dataset
# featurewise_std_normalization=False,
# # divide each input by its std
# samplewise_std_normalization=False,
# # apply ZCA whitening
# zca_whitening=False,
# # epsilon for ZCA whitening
# zca_epsilon=1e-06,
# # randomly rotate images in the range (deg 0 to 180)
# rotation_range=0,
# # randomly shift images horizontally
# width_shift_range=0.1,
# # randomly shift images vertically
# height_shift_range=0.1,
# # set range for random shear
# shear_range=0.,
# # set range for random zoom
# zoom_range=0.,
# # set range for random channel shifts
# channel_shift_range=0.,
# # set mode for filling points outside the input boundaries
# fill_mode='nearest',
# # value used for fill_mode = "constant"
# cval=0.,
# # randomly flip images
# horizontal_flip=True,
# # randomly flip images
# vertical_flip=False,
# # set rescaling factor (applied before any other transformation)
# rescale=None,
# # set function that will be applied on each input
# preprocessing_function=None,
# # image data format, either "channels_first" or "channels_last"
# data_format=None,
# # fraction of images reserved for validation (strictly between 0 and 1)
# validation_split=0.0)
#
# # Compute quantities required for featurewise normalization
# # (std, mean, and principal components if ZCA whitening is applied).
# datagen.fit(x_train)
#
# # Fit the model on the batches generated by datagen.flow().
# model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
# validation_data=(x_test, y_test),
# epochs=epochs, verbose=1, workers=4,
# callbacks=callbacks)
| 15,635 | 36.317422 | 81 |
py
|
fl-analysis
|
fl-analysis-master/src/model/modelc.py
|
import tensorflow.keras as keras
from tensorflow.keras.regularizers import l2
from tensorflow.keras import layers
def build_modelc(l2_reg):
do = 0.2
model = keras.Sequential()
# model.add(layers.Dropout(0.2, noise_shape=(32, 32, 3)))
model.add(layers.Conv2D(filters=96, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg), input_shape=(32, 32, 3)))
model.add(layers.Conv2D(filters=96, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
model.add(layers.Conv2D(filters=96, kernel_size=3, strides=2, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
# model.add(layers.Dropout(0.5))
model.add(layers.Conv2D(filters=192, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
model.add(layers.Conv2D(filters=192, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
model.add(layers.Conv2D(filters=192, kernel_size=3, strides=2, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
# model.add(layers.Dropout(0.5))
model.add(layers.Conv2D(filters=192, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
model.add(layers.Conv2D(filters=192, kernel_size=1, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
model.add(layers.Conv2D(filters=10, kernel_size=1, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
model.add(layers.GlobalAveragePooling2D())
model.add(layers.Dense(units=10, activation='softmax'))
return model
| 2,230 | 73.366667 | 218 |
py
|
fl-analysis
|
fl-analysis-master/src/model/lenet.py
|
import tensorflow.keras as keras
from tensorflow.keras import layers
from tensorflow.keras.regularizers import l2
def build_lenet5(input_shape=(32, 32, 3), l2_reg=None):
do = 0.0
regularizer = l2(l2_reg) if l2_reg is not None else None
model = keras.Sequential()
model.add(layers.Conv2D(filters=6, kernel_size=5, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=regularizer, bias_regularizer=regularizer, input_shape=input_shape))
model.add(layers.MaxPooling2D())
model.add(layers.Conv2D(filters=16, kernel_size=5, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=regularizer, bias_regularizer=regularizer))
model.add(layers.MaxPooling2D())
model.add(layers.Flatten())
model.add(layers.Dropout(do))
model.add(layers.Dense(units=120, kernel_initializer='he_normal', kernel_regularizer=regularizer, bias_regularizer=regularizer, activation='relu'))
model.add(layers.Dropout(do))
model.add(layers.Dense(units=84, kernel_initializer='he_normal', kernel_regularizer=regularizer, bias_regularizer=regularizer, activation='relu'))
model.add(layers.Dropout(do))
model.add(layers.Dense(units=10, activation='softmax'))
return model
| 1,288 | 46.740741 | 220 |
py
|
fl-analysis
|
fl-analysis-master/src/model/__init__.py
| 0 | 0 | 0 |
py
|
|
fl-analysis
|
fl-analysis-master/src/model/test_model.py
|
import tensorflow.keras as keras
from tensorflow.keras import layers
from tensorflow.keras.regularizers import l2
def build_test_model(input_shape=(32, 32, 3), l2_reg=None):
do = 0.0
regularizer = l2(l2_reg) if l2_reg is not None else None
model = keras.Sequential()
model.add(layers.Conv2D(filters=6, kernel_size=5, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=regularizer, bias_regularizer=regularizer, input_shape=input_shape))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D())
model.add(layers.Flatten())
model.add(layers.Dense(units=120, kernel_initializer='he_normal', kernel_regularizer=regularizer, bias_regularizer=regularizer, activation='relu'))
model.add(layers.Dense(units=10, activation='softmax'))
return model
| 848 | 37.590909 | 220 |
py
|
fl-analysis
|
fl-analysis-master/src/model/mobilenet.py
|
# Implementation by https://github.com/ruchi15/CNN-MobileNetV2-Cifar10
import tensorflow as tf
import os
import warnings
import numpy as np
from tensorflow.keras.layers import Input, Activation, Conv2D, Dense, Dropout, BatchNormalization, ReLU, \
DepthwiseConv2D, GlobalAveragePooling2D, GlobalMaxPooling2D, Add
from tensorflow.keras.models import Model
from keras import regularizers
# define the filter size
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
# define the calculation of each 'inverted Res_Block'
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
prefix = 'block_{}_'.format(block_id)
in_channels = inputs.shape[-1]
pointwise_conv_filters = int(filters * alpha)
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
# Expand
if block_id:
x = Conv2D(expansion * in_channels, kernel_size=1, strides=1, padding='same', use_bias=False, activation=None,
kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5), name=prefix + 'expand')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'expand_BN')(x)
x = ReLU(6., name=prefix + 'expand_relu')(x)
else:
prefix = 'expanded_conv_'
# Depthwise
x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None, use_bias=False, padding='same',
kernel_initializer="he_normal", depthwise_regularizer=regularizers.l2(4e-5),
name=prefix + 'depthwise')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'depthwise_BN')(x)
x = ReLU(6., name=prefix + 'depthwise_relu')(x)
# Project
x = Conv2D(pointwise_filters, kernel_size=1, strides=1, padding='same', use_bias=False, activation=None,
kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5), name=prefix + 'project')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'project_BN')(x)
if in_channels == pointwise_filters and stride == 1:
return Add(name=prefix + 'add')([inputs, x])
return x
# Create Build
def create_model(rows, cols, channels):
# encoder - input
alpha = 0.5
include_top = True
model_input = tf.keras.Input(shape=(rows, cols, channels), name='input_image')
x = model_input
first_block_filters = _make_divisible(32 * alpha, 8)
# model architechture
x = Conv2D(first_block_filters, kernel_size=3, strides=1, padding='same', use_bias=False,
kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5), name='Conv1')(model_input)
x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1, expansion=1, block_id=0)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1, expansion=6, block_id=1)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1, expansion=6, block_id=2)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2, expansion=6, block_id=3)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=4)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=5)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=2, expansion=6, block_id=6)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=7)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=8)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=9)
x = Dropout(rate=0.25)(x)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=10)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=11)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=12)
x = Dropout(rate=0.25)(x)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=13)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=14)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=15)
x = Dropout(rate=0.25)(x)
x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, expansion=6, block_id=16)
x = Dropout(rate=0.25)(x)
# define filter size (last block)
if alpha > 1.0:
last_block_filters = _make_divisible(1280 * alpha, 8)
else:
last_block_filters = 1280
x = Conv2D(last_block_filters, kernel_size=1, use_bias=False, kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(4e-5), name='Conv_1')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999, name='Conv_1_bn')(x)
x = ReLU(6., name='out_relu')(x)
if include_top:
x = GlobalAveragePooling2D(name='global_average_pool')(x)
x = Dense(10, activation='softmax', use_bias=True, name='Logits')(x)
else:
pass
# if pooling == 'avg':
# x = GlobalAveragePooling2D()(x)
# elif pooling == 'max':
# x = GlobalMaxPooling2D()(x)
# create model of MobileNetV2 (for CIFAR-10)
model = Model(inputs=model_input, outputs=x, name='mobilenetv2_cifar10')
# model.compile(optimizer=tf.keras.optimizers.Adam(lr_initial), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# model.compile(optimizer=tf.keras.optimizers.Adam(lr_initial), loss='sparse_categorical_crossentropy',
# metrics=['accuracy'])
return model
def mobilenetv2_cifar10():
# model = create_model(32, 32, 3)
# # model.summary()
#
# return model
# inputs = tf.keras.Input(shape=(32, 32, 3))
# resize_layer = tf.keras.layers.Lambda(
# lambda image: tf.image.resize(
# image,
# (224, 224),
# method=tf.image.ResizeMethod.BICUBIC,
# preserve_aspect_ratio=True
# )
# , input_shape=(32, 32, 3))(inputs)
return tf.keras.applications.mobilenet_v2.MobileNetV2(
input_shape=(32, 32, 3), alpha=0.5,
include_top=True, weights=None, input_tensor=None, pooling=None,
classes=10
)
| 6,395 | 41.357616 | 129 |
py
|
fl-analysis
|
fl-analysis-master/src/model/stacked_lstm.py
|
import tensorflow as tf
# class StackedLSTM(tf.keras.Model):
# def __init__(self, vocab_size, embedding_dim, n_hidden):
# super().__init__(self)
# self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
#
# rnn_cells = [tf.keras.layers.LSTMCell(n_hidden) for _ in range(2)]
# stacked_lstm = tf.keras.layers.StackedRNNCells(rnn_cells)
# self.lstm_layer = tf.keras.layers.RNN(stacked_lstm)
#
# self.dense = tf.keras.layers.Dense(vocab_size)
#
# def call(self, inputs, states=None, return_state=False, training=False):
# x = inputs
# x = self.embedding(x, training=training)
# if states is None:
# states = self.lstm_layer.get_initial_state(x)
# x, states = self.lstm_layer(x, initial_state=states, training=training)
# x = self.dense(x, training=training)
#
# if return_state:
# return x, states
# else:
# return x
#
#
#
# def build_stacked_lstm():
# model = StackedLSTM(80, 8, 256)
# model.call(tf.keras.layers.Input(shape=(80), name="test_prefix"))
# # model.build(input_shape=(None, 80))
# model.summary()
# return model
from tensorflow.keras.layers import Embedding, Dense, LSTM
from tensorflow.keras import Sequential
def build_stacked_lstm():
vocab_size, embedding_dim, n_hidden = 80, 8, 256
model = Sequential()
model.add(Embedding(vocab_size, embedding_dim))
# rnn_cells = [tf.keras.layers.LSTMCell(n_hidden) for _ in range(2)]
# stacked_lstm = tf.keras.layers.StackedRNNCells(rnn_cells)
# lstm_layer = tf.keras.layers.RNN(stacked_lstm)
model.add(LSTM(n_hidden, return_sequences=True))
model.add(LSTM(n_hidden, return_sequences=False))
model.add(Dense(vocab_size, activation='softmax'))
return model
| 1,762 | 31.054545 | 77 |
py
|
fl-analysis
|
fl-analysis-master/src/error/__init__.py
|
class ConfigurationError(Exception):
pass
| 47 | 11 | 36 |
py
|
us_hep_funding
|
us_hep_funding-main/us_hep_funding/constants.py
|
"""Constants for configuring us_hep_funding"""
import pathlib
RAW_DATA_PATH = pathlib.Path("/workspaces/us_hep_funding/raw_data/")
CLEANED_DBS_PATH = pathlib.Path("/workspaces/us_hep_funding/cleaned_data/")
USASPENDING_BASEURL = "https://files.usaspending.gov/award_data_archive/"
DOE_CONTRACTS_STR = "_089_Contracts_Full_20220208"
NSF_GRANTS_STR = "_049_Assistance_Full_20220208"
# have to be explicit about these since DOE changes the file names in some years.
DOE_GRANTS_URLS = {
2012: "https://science.osti.gov/-/media/_/excel/universities/DOE-SC_Grants_FY2012.xlsx",
2013: "https://science.osti.gov/-/media/_/excel/universities/DOE-SC_Grants_FY2013.xlsx",
2014: "https://science.osti.gov/-/media/_/excel/universities/DOE-SC_Grants_FY2014.xlsx",
2015: "https://science.osti.gov/-/media/_/excel/universities/DOE-SC_Grants_FY2015.xlsx",
2016: "https://science.osti.gov/-/media/_/excel/universities/DOE-SC_grants_FY2016.xlsx",
2017: "https://science.osti.gov/-/media/_/excel/universities/DOE-SC_grants_FY2017.xlsx",
2018: "https://science.osti.gov/-/media/_/excel/universities/DOE-SC_Grants_FY2018.xlsx",
2019: "https://science.osti.gov/-/media/_/excel/universities/DOE-SC_Grants_FY2019.xlsx",
2020: "https://science.osti.gov/-/media/_/excel/universities/DOE-SC_Grants_FY2020.xlsx",
2021: "https://science.osti.gov/-/media/_/excel/universities/SC-in-Your-State-FY-2021.xlsx",
}
SULI_STUDENT_URLS = {
2014: "https://science.osti.gov/-/media/wdts/suli/pdf/2014-SULI-Terms_Participant-Report.pdf",
2015: "https://science.osti.gov/-/media/wdts/suli/pdf/2015-SULI-Terms_Participant-Report.pdf",
2016: "https://science.osti.gov/-/media/wdts/suli/pdf/2016-SULI-Terms_Participant-Report.pdf",
2017: "https://science.osti.gov/-/media/wdts/suli/pdf/SULI-participants-2017.pdf",
2018: "https://science.osti.gov/-/media/wdts/suli/pdf/SULI-participants-2018_a.pdf",
2019: "https://science.osti.gov/-/media/wdts/suli/pdf/SULI-participants-2019.pdf",
2020: "https://science.osti.gov/-/media/wdts/suli/pdf/2020-SULI-participants.pdf",
}
SC_CONTRACTS_OFFICES = [
"CHICAGO SERVICE CENTER (OFFICE OF SCIENCE)",
"OAK RIDGE OFFICE (OFFICE OF SCIENCE)",
"SCIENCE",
"SC OAK RIDGE OFFICE",
"SC CHICAGO SERVICE CENTER",
]
| 2,291 | 53.571429 | 98 |
py
|
us_hep_funding
|
us_hep_funding-main/us_hep_funding/__init__.py
| 0 | 0 | 0 |
py
|
|
us_hep_funding
|
us_hep_funding-main/us_hep_funding/clients/create_doe_grants.py
|
import pandas as pd
from us_hep_funding.constants import CLEANED_DBS_PATH, RAW_DATA_PATH
from us_hep_funding.data.cleaners import DoeGrantsCleaner
def run():
doe_grants2012 = DoeGrantsCleaner(
RAW_DATA_PATH / "unzipped" / "DOE-SC_Grants_FY2012.xlsx",
2012,
sheet_name="DOE SC Awards FY 2012",
amount_key="2012 Funding",
program_office_key="SC Program",
project_title_key="Project Title",
pi_key="Principal Investigator(s)",
).run()
doe_grants2013 = DoeGrantsCleaner(
RAW_DATA_PATH / "unzipped" / "DOE-SC_Grants_FY2013.xlsx",
2013,
sheet_name="DOE SC Awards FY 2013",
skiprows=1,
district_key="Congressional District *",
amount_key="FY 2013 Funding",
program_office_key="SC Program",
project_title_key="Project Title",
pi_key="Principal Investigator(s)",
).run()
doe_grants2014 = DoeGrantsCleaner(
RAW_DATA_PATH / "unzipped" / "DOE-SC_Grants_FY2014.xlsx",
2014,
sheet_name="DOE SC Awards FY 2014",
project_title_key="Project Title",
).run()
doe_grants2015 = DoeGrantsCleaner(
RAW_DATA_PATH / "unzipped" / "DOE-SC_Grants_FY2015.xlsx",
2015,
sheet_name="DOE SC Awards FY 2015",
project_title_key="Project Title",
state_key="State/Territory",
).run()
doe_grants2016 = DoeGrantsCleaner(
RAW_DATA_PATH / "unzipped" / "DOE-SC_Grants_FY2016.xlsx",
2016,
state_key="State/Territory",
).run()
doe_grants2017 = DoeGrantsCleaner(
RAW_DATA_PATH / "unzipped" / "DOE-SC_Grants_FY2017.xlsx",
2017,
pi_key="Principlal Investigator",
).run()
doe_grants2018 = DoeGrantsCleaner(
RAW_DATA_PATH / "unzipped" / "DOE-SC_Grants_FY2018.xlsx",
2018,
program_office_key="Program Office",
pi_key="Principlal Investigator",
).run()
doe_grants2019 = DoeGrantsCleaner(
RAW_DATA_PATH / "unzipped" / "DOE-SC_Grants_FY2019.xlsx", 2019, pi_key="PI"
).run()
doe_grants2020 = DoeGrantsCleaner(
RAW_DATA_PATH / "unzipped" / "DOE-SC_Grants_FY2020.xlsx",
2020,
pi_key="PI",
).run()
doe_grants2021 = DoeGrantsCleaner(
RAW_DATA_PATH / "unzipped" / "SC-in-Your-State-FY-2021.xlsx", 2021, pi_key="PI"
).run()
merged_df = pd.concat(
[
doe_grants2012,
doe_grants2013,
doe_grants2014,
doe_grants2015,
doe_grants2016,
doe_grants2017,
doe_grants2018,
doe_grants2019,
doe_grants2020,
doe_grants2021,
]
)
merged_df.to_csv(CLEANED_DBS_PATH / "doe_grants.csv")
if __name__ == "__main__":
run()
| 2,809 | 27.383838 | 87 |
py
|
us_hep_funding
|
us_hep_funding-main/us_hep_funding/clients/ad_hoc.py
|
data2.loc[
data2["Institution"] == "University of Minnesota", "Congressional District"
] = "MN-05"
data4.loc[
data4["Institution"] == "CALIFORNIA INST. OF TECHNOLOGY",
"Congressional District *",
] = "CA-27"
data3.loc[
data3["Institution"] == "California Institute of Technology (CalTech)",
"Congressional District",
] = "CA-27"
data2.loc[
data2["Institution"] == "California Institute of Technology",
"Congressional District",
] = "CA-27"
data.loc[
data["Institution"] == "California Institute of Technology",
"Congressional District",
] = "CA-27"
data0.loc[
data0["Institution"] == "California Institute of Technology",
"Congressional District",
] = "CA-27"
dataA.loc[
dataA["Institution"] == "California Institute of Technology",
"Congressional District",
] = "CA-27"
# suli:
data = data.append(
pd.DataFrame(
np.array(
[
["Reed Bowles"],
["Wichita State University"],
["Fermi National Accelerator Laboratory"],
["Summer 2017"],
]
).T,
columns=["Name", "College", "Host Lab", "Term"],
),
ignore_index=True,
)
| 1,189 | 26.045455 | 79 |
py
|
us_hep_funding
|
us_hep_funding-main/us_hep_funding/clients/create_suli_data.py
|
import pandas as pd
from us_hep_funding.constants import CLEANED_DBS_PATH
from us_hep_funding.data.cleaners import SuliStudentDataCleaner
from us_hep_funding.mapping import SuliStudentMapMaker
def run():
suli2014 = SuliStudentDataCleaner(
"/workspaces/us_hep_funding/raw_data/unzipped/2014-SULI-Terms_Participant-Report.pdf",
2014,
{
0: "Name",
1: "Institution",
2: "Host Lab",
3: "Term",
},
).run()
suli2015 = SuliStudentDataCleaner(
"/workspaces/us_hep_funding/raw_data/unzipped/2015-SULI-Terms_Participant-Report.pdf",
2015,
{0: "Name", 1: "Institution", 2: "Host Lab", 3: "Term"},
).run()
suli2016 = SuliStudentDataCleaner(
"/workspaces/us_hep_funding/raw_data/unzipped/2016-SULI-Terms_Participant-Report.pdf",
2016,
{
0: "Name",
1: "Institution",
2: "Host Lab",
3: "Term",
},
).run()
suli2017 = SuliStudentDataCleaner(
"/workspaces/us_hep_funding/raw_data/unzipped/SULI-participants-2017.pdf",
2017,
{
0: "Name",
1: "Institution",
2: "Host Lab",
3: "Season",
4: "Year",
},
).run()
suli2018 = SuliStudentDataCleaner(
"/workspaces/us_hep_funding/raw_data/unzipped/SULI-participants-2018_a.pdf",
2018,
{
0: "Name",
1: "Institution",
2: "Host Lab",
3: "Season",
4: "Year",
},
).run()
suli2019 = SuliStudentDataCleaner(
"/workspaces/us_hep_funding/raw_data/unzipped/SULI-participants-2019.pdf",
2019,
{
0: "Name",
1: "Institution",
2: "Host Lab",
3: "Season",
4: "Year",
},
).run()
suli2020 = SuliStudentDataCleaner(
"/workspaces/us_hep_funding/raw_data/unzipped/2020-SULI-participants.pdf",
2020,
{
0: "Term",
1: "First Name",
2: "Last Name",
3: "Institution",
4: "Host Lab",
},
).run()
merged = pd.concat(
[suli2014, suli2015, suli2016, suli2017, suli2018, suli2019, suli2020],
ignore_index=True,
)
merged.to_csv(CLEANED_DBS_PATH / "suli_students.csv")
def geocode():
df = pd.read_csv(CLEANED_DBS_PATH / "suli_students.csv")
geocodes = pd.read_csv(CLEANED_DBS_PATH / "geocodes.csv")
merged = df.merge(geocodes, left_on="Institution", right_on="College", how="inner")
natlabs = pd.read_csv(CLEANED_DBS_PATH / "national_labs_geocodio.csv")
natlabs = natlabs[["Lab", "City", "Latitude", "Longitude"]].dropna()
natlabs = natlabs.rename(
columns={
"Lab": "Host Lab",
"City": "Lab City",
"Latitude": "Lab Latitude",
"Longitude": "Lab Longitude",
}
)
geo_students = merged.merge(natlabs, on="Host Lab")
print(len(merged), len(geo_students))
geo_students.to_csv(CLEANED_DBS_PATH / "suli_students_geocoded.csv")
def make_maps():
import us
mapper = SuliStudentMapMaker()
for state in us.states.STATES:
mapper.plot_suli_state_formal(state.abbr)
if __name__ == "__main__":
make_maps()
| 3,362 | 25.480315 | 94 |
py
|
us_hep_funding
|
us_hep_funding-main/us_hep_funding/clients/__init__.py
| 0 | 0 | 0 |
py
|
|
us_hep_funding
|
us_hep_funding-main/us_hep_funding/clients/create_databases.py
|
"""This will be the top-level API for producing updated
data tables."""
from datetime import datetime
from us_hep_funding.data.downloaders import (
UsaSpendingDataDownloader,
DoeDataDownloader,
SuliStudentDataDownloader,
)
from us_hep_funding.data.cleaners import DoeContractDataCleaner, NsfGrantsCleaner
# # 2011 is as far as usaspending data goes back.
# # DOE grants go back to 2012.
YEARS_OF_INTEREST = range(2014, datetime.now().year + 1)
# usa_spending_downloader = UsaSpendingDataDownloader()
# doe_downloader = DoeDataDownloader()
suli_downloader = SuliStudentDataDownloader()
for fiscal_year in YEARS_OF_INTEREST:
# usa_spending_downloader.run(fiscal_year)
# doe_downloader.run(fiscal_year)
suli_downloader.run(fiscal_year)
# doe_contract_cleaner = DoeContractDataCleaner()
# doe_contract_cleaner.run()
# nsf_grants_cleaner = NsfGrantsCleaner()
# nsf_grants_cleaner.run()
| 919 | 29.666667 | 81 |
py
|
us_hep_funding
|
us_hep_funding-main/us_hep_funding/clients/create_maps.py
|
"""This will be the top-level API for creating maps
of SULI/CCI/VFP data."""
| 78 | 25.333333 | 52 |
py
|
us_hep_funding
|
us_hep_funding-main/us_hep_funding/data/__init__.py
| 0 | 0 | 0 |
py
|
|
us_hep_funding
|
us_hep_funding-main/us_hep_funding/data/cleaners/_nsf_grants_cleaner.py
|
import pandas as pd
from us_hep_funding.constants import RAW_DATA_PATH, CLEANED_DBS_PATH
class NsfGrantsCleaner:
def __init__(self):
self.contract_file_list = (RAW_DATA_PATH / "unzipped").glob(
"*049_Assistance*.csv"
)
def _load_data(self):
contract_df_list = []
for contract_file in self.contract_file_list:
df = pd.read_csv(contract_file)
df["Year"] = contract_file.stem[2:6]
df = df[
df["cfda_title"].map(str.strip).map(str.lower)
== "mathematical and physical sciences"
]
contract_df_list.append(df)
return pd.concat(contract_df_list, ignore_index=True)
def _clean_data(self, mps_grants: pd.DataFrame):
mps_grants = mps_grants[
[
"Year",
"cfda_title",
"federal_action_obligation",
"recipient_state_code",
"recipient_congressional_district",
"recipient_name",
]
]
mps_grants = mps_grants.rename(
columns={
"federal_action_obligation": "Amount ($)",
"recipient_state_code": "State",
"recipient_congressional_district": "District",
"recipient_name": "Institution",
}
)
mps_grants = mps_grants.dropna(subset=["District"])
mps_grants["District"] = mps_grants["State"] + mps_grants["District"].map(
int
).map(str).str.zfill(2)
mps_grants.loc[mps_grants["District"] == "OR00", "State"] = "PR"
mps_grants.loc[mps_grants["District"] == "OR00", "District"] = "PR00"
mps_grants = mps_grants[mps_grants["Amount ($)"] > 0]
mps_grants["Amount ($)"] = mps_grants["Amount ($)"].round(0)
mps_insts = mps_grants["Institution"]
mps_insts = mps_insts.str.replace("THE ", "")
mps_insts = mps_insts.str.replace("REGENTS OF THE ", "")
mps_insts = mps_insts.str.replace("PRESIDENT AND FELLOWS OF ", "")
mps_insts = mps_insts.str.replace(r" \(THE\)", "")
mps_insts = mps_insts.str.replace("TRUSTEES OF ", "")
mps_insts = mps_insts.str.replace(", THE$", "")
mps_insts = mps_insts.str.replace(", INC$", "")
mps_insts = mps_insts.str.replace(", INC.$", "")
mps_insts = mps_insts.str.replace(r" \(INC\)$", "")
mps_insts = mps_insts.str.replace("FDN$", "Foundation")
mps_insts = mps_insts.str.replace("ASTRON$", "Astronomy")
mps_insts = mps_insts.str.replace(r" \(THE\)$", "")
mps_insts = mps_insts.str.replace(" INST ", " INSTITUTE ")
mps_insts = mps_insts.str.replace("TECH$", "TECHNOLOGY")
mps_insts = mps_insts.str.replace(",", "")
mps_insts = mps_insts.str.replace(" INC$", "")
mps_insts = mps_insts.str.replace("UNIV$", "UNIVERSITY")
mps_insts = mps_insts.str.replace(
"MIT$", "MASSACHUSETTS INSTITUTE OF TECHNOLOGY"
)
mps_insts = mps_insts.str.replace(r" \(INC.\)$", "")
mps_insts = mps_insts.str.replace(r"\d+$", "")
mps_insts = mps_insts.str.replace("^U ", "UNIVERSITY ")
mps_insts = mps_insts.str.replace(" CAL ", " CALIFORNIA ")
mps_insts = mps_insts.str.replace("UNIVERSTIY", "UNIVERSITY")
mps_insts = mps_insts.str.replace("UNIVER$", "UNIVERSITY")
mps_insts = mps_insts.str.strip()
mps_insts = mps_insts.str.title()
mps_insts = mps_insts.str.replace("Csu", "CSU")
mps_insts = mps_insts.str.replace("'S", "'s")
mps_grants["Institution"] = mps_insts
return mps_grants
def run(self):
mps_grants = self._load_data()
cleaned_df = self._clean_data(mps_grants)
cleaned_df.to_csv(CLEANED_DBS_PATH / "test_nsf_grants.csv", index=False)
| 3,888 | 39.092784 | 82 |
py
|
us_hep_funding
|
us_hep_funding-main/us_hep_funding/data/cleaners/_doe_contracts_cleaner.py
|
import pandas as pd
from us_hep_funding.constants import (
CLEANED_DBS_PATH,
RAW_DATA_PATH,
SC_CONTRACTS_OFFICES,
)
class DoeContractDataCleaner:
def __init__(self):
self.contract_file_list = (RAW_DATA_PATH / "unzipped").glob(
"*089_Contracts*.csv"
)
def _load_data(self):
contract_df_list = []
for contract_file in self.contract_file_list:
df = pd.read_csv(contract_file)
df["Year"] = contract_file.stem[2:6]
df = df[
(df["awarding_office_name"].isin(SC_CONTRACTS_OFFICES))
| (df["funding_office_name"].isin(SC_CONTRACTS_OFFICES))
]
contract_df_list.append(df)
return pd.concat(contract_df_list, ignore_index=True)
def _clean_data(self, sc_contracts: pd.DataFrame):
sc_contracts = sc_contracts[
[
"award_id_piid",
"federal_action_obligation",
"recipient_name",
"primary_place_of_performance_state_code",
"primary_place_of_performance_congressional_district",
"product_or_service_code_description",
"Year",
]
]
sc_contracts = sc_contracts.rename(
columns={
"federal_action_obligation": "Amount ($)",
"award_id_piid": "award_id",
"recipient_name": "Vendor",
"primary_place_of_performance_state_code": "State",
"primary_place_of_performance_congressional_district": "District",
"product_or_service_code_description": "Item",
}
)
sc_contracts = sc_contracts.dropna(subset=["District"])
sc_contracts["District"] = sc_contracts["State"] + sc_contracts["District"].map(
int
).map(str).str.zfill(2)
sc_contracts = sc_contracts[sc_contracts["Amount ($)"] > 0]
sc_contracts["Amount ($)"] = sc_contracts["Amount ($)"].round(0)
vendors = sc_contracts["Vendor"]
vendors = vendors.str.title()
vendors = vendors.str.replace("'S", "'s")
vendors = vendors.str.replace(" Limited Liability Company", ", LLC")
vendors = vendors.str.replace("Llc", "LLC")
vendors = vendors.str.replace("Incorporated", "Inc.")
vendors = vendors.str.replace("It", "IT")
vendors = vendors.str.replace("Pc", "PC")
sc_contracts["Vendor"] = vendors
items = sc_contracts["Item"]
items = items.str.title()
items = items.str.replace("Oper ", "Operation ")
items = items.str.replace("Goco", "GOCO")
items = items.str.replace("Gogo", "GOGO")
items = items.str.replace("It", "IT")
items = items.str.replace("Adpe", "ADPE")
items = items.str.replace("Adp", "ADP")
items = items.str.replace("Cpu", "CPU")
sc_contracts["Item"] = items
return sc_contracts
def run(self):
sc_contracts = self._load_data()
cleaned_df = self._clean_data(sc_contracts)
cleaned_df.to_csv(CLEANED_DBS_PATH / "test_contracts.csv", index=False)
| 3,175 | 35.505747 | 88 |
py
|
us_hep_funding
|
us_hep_funding-main/us_hep_funding/data/cleaners/_suli_cci_cleaner.py
|
import camelot
import pandas as pd
_LAB_ABBRS_TO_NAMES = {
"LBNL": "Lawrence Berkeley National Laboratory",
"BNL": "Brookhaven National Laboratory",
"ANL": "Argonne National Laboratory",
"ORNL": "Oak Ridge National Laboratory",
"NREL": "National Renewable Energy Laboratory",
"PNNL": "Pacific Northwest National Laboratory",
"LANL": "Los Alamos National Laboratory",
"LLNL": "Lawrence Livermore National Laboratory",
"AMES": "Ames National Laboratory",
"INL": "Idaho National Laboratory",
"PPPL": "Princeton Plasma Physics Laboratory",
"SLAC": "SLAC National Accelerator Laboratory",
"FNAL": "Fermi National Accelerator Laboratory",
"TJNAF": "Thomas Jefferson National Accelerator Facility",
}
_NON_HEP_LABS = [
"GA / DIII-D",
"SNL NM",
"SNL CA",
"DOE Naval Reactors",
"General Atomics / DIII-D",
"Sandia National Laboratory",
]
class SuliStudentDataCleaner:
def __init__(self, filepath, fiscal_year, column_remapper):
self.fiscal_year = fiscal_year
tables = camelot.read_pdf(filepath, flavor="stream", pages="all")
for i, page in enumerate(tables):
if i == 0:
self.df = page.df
else:
self.df = pd.concat([self.df, page.df])
self.df = self.df.rename(
column_remapper,
axis=1,
)
def _unify_formatting(self):
if "First Name" in self.df.columns:
self.df["Name"] = self.df["First Name"] + " " + self.df["Last Name"]
del self.df["First Name"], self.df["Last Name"]
if "Season" in self.df.columns:
self.df["Term"] = self.df["Season"] + " " + self.df["Year"]
del self.df["Season"], self.df["Year"]
def run(self):
# delete rows with blanks which are fake table rows added by PDF reader
self.df.replace("", float("NaN"), inplace=True)
self.df.dropna(inplace=True)
self.df.drop_duplicates(inplace=True, keep=False)
self._unify_formatting()
self.df["Institution"] = self.df["Institution"].str.normalize("NFKD")
self.df["Host Lab"] = self.df["Host Lab"].str.normalize("NFKD")
# delete trailing parenthetical lab name abbreviations
self.df["Host Lab"] = (
self.df["Host Lab"].str.replace("\(\w*\)$", "").str.rstrip()
)
# drop students from obviously non-HEP labs
self.df = self.df[~self.df["Host Lab"].isin(_NON_HEP_LABS)]
# some years list only lab abbreviations rather than names
if self.df["Host Lab"].isin(_LAB_ABBRS_TO_NAMES.keys()).all():
self.df["Host Lab"] = self.df["Host Lab"].map(_LAB_ABBRS_TO_NAMES)
self.df["Program"] = "SULI"
self.df["Institution"] = self.df["Institution"].str.replace(
"Stony Brook University", "State University of New York at Stony Brook"
)
return self.df
| 2,950 | 33.313953 | 83 |
py
|
us_hep_funding
|
us_hep_funding-main/us_hep_funding/data/cleaners/__init__.py
|
from ._doe_contracts_cleaner import DoeContractDataCleaner
from ._nsf_grants_cleaner import NsfGrantsCleaner
from ._doe_grants_cleaner import DoeGrantsCleaner
from ._suli_cci_cleaner import SuliStudentDataCleaner
| 213 | 41.8 | 58 |
py
|
us_hep_funding
|
us_hep_funding-main/us_hep_funding/data/cleaners/_doe_grants_cleaner.py
|
import re
import numpy as np
import pandas as pd
from titlecase import titlecase
class DoeGrantsCleaner:
def __init__(
self,
filepath,
fiscal_year,
sheet_name=0,
skiprows=0,
institution_key="Institution",
district_key="Congressional District",
amount_key="Awarded Amount",
state_key="State",
program_office_key="Organization",
project_title_key="Title",
award_number_key="Award Number",
pi_key="Principal Investigator",
):
self.data = pd.read_excel(filepath, sheet_name=sheet_name, skiprows=skiprows)
# set weird column headings to uniform ones
column_heading_remapper = {
program_office_key: "SC Office",
state_key: "State",
district_key: "District",
institution_key: "Institution",
amount_key: "Amount ($)",
project_title_key: "Project Title",
pi_key: "Principal Investigator",
award_number_key: "Award Number",
}
self.data.rename(columns=column_heading_remapper, inplace=True)
# only keep relevant columns
self.data = self.data[column_heading_remapper.values()]
self.data["Year"] = fiscal_year * np.ones(len(self.data), dtype=int)
print(fiscal_year)
def run(self):
hepdata = self._get_hep_grants()
return self._clean_data(hepdata)
def _get_hep_grants(self):
agencies = self.data["SC Office"].values
abbrev_agencies = []
for entry in list(agencies):
test = re.split(r"\(|\)", str(entry))
if len(test) > 1:
abbrev_agencies.append(test[1])
else:
abbrev_agencies.append(entry)
self.data["SC Office"] = abbrev_agencies
return self.data[
(self.data["SC Office"] == "HEP")
| (self.data["SC Office"] == "High Energy Physics")
]
@staticmethod
def _clean_data(hepdata):
# strip out whitespace
hepdata["State"] = hepdata["State"].map(str).map(str.strip)
hepdata = hepdata.dropna(subset=["Amount ($)"])
hepdata["Project Title"].replace("‐", "-", inplace=True)
# unicode problems in the raw data
hepdata["Project Title"].loc[
1675
] = "High Energy Physics - Energy, Intensity, Theoretical Frontier"
hepdata["Project Title"].loc[
4357
] = "High Energy Physics - Energy, Intensity, Theoretical Frontier"
# clean up institute names
insts = hepdata["Institution"]
insts = insts.str.strip()
insts = insts.map(titlecase)
insts = insts.str.replace(" At ", " - ")
insts = insts.str.replace(", ", " - ")
insts = insts.str.replace("U. ", "University ")
insts = insts.str.replace("Inst. ", "Institute ")
insts = insts.str.replace("Cuny", "CUNY")
insts = insts.str.replace("Suny", "SUNY")
insts = insts.str.replace(
"University Of Illinois - Urbana-Champain",
"University Of Illinois - Urbana-Champaign",
)
insts = insts.str.replace("Llc", "LLC")
insts = insts.str.replace("Ieee", "IEEE")
insts = insts.str.replace("Mit", "MIT")
insts = insts.str.replace(
"City College Of New York \(CUNY\) - Queens College",
"CUNY - Queens College",
)
insts = insts.str.replace(
"State University Of New York \(SUNY\) - Albany", "SUNY - Albany"
)
insts = insts.str.replace(
"Virginia Polytechnic Institute And State University \(Virginia Tech\)",
"Virginia Tech University",
)
insts = insts.str.replace(
"Virginia Polytechnic Institute And State University",
"Virginia Tech University",
)
insts = insts.str.replace(
"Virginia Tech \(Virginia Tech\)", "Virginia Tech University"
)
insts = insts.str.replace("Univ\.", "University")
insts = insts.str.replace(
"State University Of New York - Stony Brook", "SUNY - Stony Brook"
)
insts = insts.str.replace(
"City University Of New York - York College", "CUNY - York College"
)
insts = insts.str.replace(
"State University Of New York - Albany", "SUNY - Albany"
)
insts = insts.str.replace("Virginia - University Of", "University of Virginia")
insts = insts.str.replace(
"College Ofwilliam And Mary", "College Of William And Mary"
)
insts = insts.str.replace(
"California Institute Of Technology \(Caltech\)",
"California Institute Of Technology",
)
insts = insts.str.replace("Harvard College", "Harvard University")
insts = insts.str.replace(
"Louisiana State University And A&M College", "Louisiana State University"
)
insts = insts.str.replace(
"Iowa State University Of Science And Technology", "Iowa State University"
)
insts = insts.str.replace(
"Massachusetts Institute Of Technology \(MIT\)",
"Massachusetts Institute Of Technology",
)
insts = insts.str.replace(
"Old Dominion University Research Foundation", "Old Dominion University"
)
insts = insts.str.replace(
"President And Fellows Of Harvard College", "Harvard University"
)
insts = insts.str.replace("SUNY - Stony Brook University", "SUNY - Stony Brook")
insts = insts.str.replace(
"Research Foundation Of The City University Of New York \(CUNY\)",
"CUNY Research Foundation",
)
insts = insts.str.replace(
"Rutgers University - New Brunswick", "Rutgers University"
)
insts = insts.str.replace(
"Rutgers University, New Brunswick", "Rutgers University"
)
insts = insts.str.replace(
"Rutgers - State University Of New Jersey - New Brunswick",
"Rutgers University",
)
insts = insts.str.replace(
"Rutgers - The State University Of New Jersey", "Rutgers University"
)
insts = insts.str.replace(
"Rutgers - The State University Of New Jersey - New Brunswick",
"Rutgers University",
)
# honestly i have no idea why this needs to happen twice!
insts = insts.str.replace(
"Rutgers University - New Brunswick", "Rutgers University"
)
insts = insts.str.replace(
"Rutgers University, New Brunswick", "Rutgers University"
)
insts = insts.str.replace(
"Rutgers - State University Of New Jersey - New Brunswick",
"Rutgers University",
)
insts = insts.str.replace(
"Rutgers - The State University Of New Jersey", "Rutgers University"
)
insts = insts.str.replace(
"Rutgers - The State University Of New Jersey - New Brunswick",
"Rutgers University",
)
#
insts = insts.str.replace(
"Smithsonian Institute - Smithsonian Astrophysical Observatory",
"Smithsonian Astrophysical Observatory",
)
insts = insts.str.replace(
"Smithsonian Institute /Smithsonian Astrophysical Observatory",
"Smithsonian Astrophysical Observatory",
)
insts = insts.str.replace(
"Texas A&M Research Foundation", "Texas A&M University"
)
insts = insts.str.replace(
"Texas A&M University - College Station", "Texas A&M University"
)
insts = insts.str.replace(
"Texas A&M University, College Station", "Texas A&M University"
)
insts = insts.str.replace("University - Albany \(SUNY\)", "SUNY - Albany")
insts = insts.str.replace("SUNY - University - Albany", "SUNY - Albany")
insts = insts.str.replace("SUNY - University Of Albany", "SUNY - Albany")
insts = insts.str.replace("Brandies University", "Brandeis University")
insts = insts.str.replace(
"University Of Notre Dame Du Lac", "University Of Notre Dame"
)
insts = insts.str.replace(
"University Of Washington - Seattle", "University Of Washington"
)
insts = insts.str.replace(
"Stony Brook University \(SUNY\)", "SUNY - Stony Brook"
)
insts = insts.str.replace(
"State University Of New York \(SUNY\) - Albany", "SUNY - Albany"
)
insts = insts.str.replace("York College \(CUNY\)", "CUNY - York College")
insts = insts.str.replace("William Marsh Rice University", "Rice University")
insts = insts.str.replace(
"Michigan Technological University", "Michigan Tech. University"
)
insts = insts.str.replace(
"President And Fellows Of Harvard University", "Harvard University"
)
insts = insts.str.replace(
"University Of Tennessee - Knoxville", "University Of Tennessee"
)
insts = insts.str.replace(
"Indiana University - Bloomington", "Indiana University"
)
insts = insts.str.replace(
"University Of Alabama - Tuscaloosa", "University Of Alabama"
)
insts = insts.str.replace(
"State University Of New York \(SUNY\) - Stony Brook", "SUNY - Stony Brook"
)
insts = insts.str.replace(
"Rensselaer Polytechnic Institute", "Rensselaer Polytechnic Inst."
)
insts = insts.str.replace(
"University Of Texas - Arlington", "University Of Texas - Arlington"
)
insts = insts.str.replace(
"Virginia Polytechnic Institute", "Virginia Tech University"
)
insts = insts.str.replace(" Of ", " of ")
insts = insts.str.replace(" In ", " in ")
hepdata["Institution"] = insts
return hepdata
| 10,169 | 37.089888 | 88 |
py
|
us_hep_funding
|
us_hep_funding-main/us_hep_funding/data/downloaders/_doe_downloader.py
|
import os
import requests
import warnings
import numpy
from us_hep_funding.constants import RAW_DATA_PATH, DOE_GRANTS_URLS
class DoeDataDownloader:
def __init__(self):
self.save_path = RAW_DATA_PATH / "unzipped"
def run(self, fiscal_year: int):
try:
url = DOE_GRANTS_URLS[fiscal_year]
except KeyError:
print("Could not find key {0} in dict DOE_GRANTS_URLS".format(fiscal_year))
return
filename = url.split("/")[-1]
if (self.save_path / filename).exists():
return
try:
# suppress https warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
r = requests.get(url, allow_redirects=True, verify=False)
r.raise_for_status()
except:
print("could not find", url)
return
zip_file_path = self.save_path / filename
with (zip_file_path).open("wb+") as f:
f.write(r.content)
print("Data download for fiscal year {0} complete".format(fiscal_year))
| 1,107 | 26.02439 | 87 |
py
|
us_hep_funding
|
us_hep_funding-main/us_hep_funding/data/downloaders/_usa_spending_downloader.py
|
"""Classes that download data from usaspending.gov"""
import pathlib
import warnings
import zipfile
import requests
from us_hep_funding.constants import (
DOE_CONTRACTS_STR,
NSF_GRANTS_STR,
RAW_DATA_PATH,
USASPENDING_BASEURL,
)
class UsaSpendingDataDownloader:
"""A downloader for getting data from usaspending.gov"""
def __init__(self):
self.base_url = USASPENDING_BASEURL
self.save_path = RAW_DATA_PATH / "zips"
self.unzip_path = RAW_DATA_PATH / "unzipped"
def _run(self, fiscal_year: int, filestr: str):
url = self.base_url + "FY" + str(fiscal_year) + filestr + ".zip"
filename = url.split("/")[-1]
if (self.save_path / filename).exists():
return
try:
# suppress https warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
r = requests.get(url, allow_redirects=True, verify=False)
r.raise_for_status()
except:
print("could not find", url)
return
zip_file_path = self.save_path / filename
with (zip_file_path).open("wb+") as f:
f.write(r.content)
zipper = zipfile.ZipFile(zip_file_path, "r")
zipper.extractall(path=str(self.unzip_path))
def run(self, fiscal_year: int):
self._run(fiscal_year, DOE_CONTRACTS_STR)
self._run(fiscal_year, NSF_GRANTS_STR)
print("Data download for fiscal year {0} complete".format(fiscal_year))
| 1,531 | 26.357143 | 79 |
py
|
us_hep_funding
|
us_hep_funding-main/us_hep_funding/data/downloaders/_suli_student_data.py
|
import os
import requests
import warnings
import numpy
from us_hep_funding.constants import RAW_DATA_PATH, SULI_STUDENT_URLS
class SuliStudentDataDownloader:
def __init__(self):
self.save_path = RAW_DATA_PATH / "unzipped"
def run(self, fiscal_year: int):
try:
url = SULI_STUDENT_URLS[fiscal_year]
except KeyError:
print("Could not find key {0} in dict DOE_GRANTS_URLS".format(fiscal_year))
return
filename = url.split("/")[-1]
if (self.save_path / filename).exists():
return
try:
# suppress https warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
r = requests.get(url, allow_redirects=True, verify=False)
r.raise_for_status()
except:
print("could not find", url)
return
zip_file_path = self.save_path / filename
with (zip_file_path).open("wb+") as f:
f.write(r.content)
print("Data download for fiscal year {0} complete".format(fiscal_year))
| 1,119 | 26.317073 | 87 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.